diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f9c69fbf5d6ee..8775e1464d068 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -117,7 +117,7 @@ For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to Please follow these formatting guidelines: * Java indent is 4 spaces -* Line width is 100 characters +* Line width is 140 characters * The rest is left to Java coding standards * Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do. * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them. diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index dd9d1781ccd9f..b3c2f4faef8e8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -311,16 +311,9 @@ class BuildPlugin implements Plugin { /** * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms. * - * + * The current fixup is to set compile time deps back to compile from runtime (known issue with maven-publish plugin). */ private static Closure fixupDependencies(Project project) { - // TODO: revisit this when upgrading to Gradle 2.14+, see Javadoc comment above return { XmlProvider xml -> // first find if we have dependencies at all, and grab the node NodeList depsNodes = xml.asNode().get('dependencies') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 4b3867a985dbf..a5845566215b9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -96,7 +96,7 @@ public class PluginBuildPlugin extends BuildPlugin { provided "com.vividsolutions:jts:${project.versions.jts}" provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" - provided "net.java.dev.jna:jna:${project.versions.jna}" + provided "org.elasticsearch:jna:${project.versions.jna}" } } diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index 85b55a71cf82a..891a85d50a930 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -22,7 +22,7 @@ suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is unfair. --> - + diff --git a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs index e30b8df6cc440..48c93f444ba2a 100644 --- a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs +++ b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs @@ -16,6 +16,6 @@ eclipse.preferences.version=1 # org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning -org.eclipse.jdt.core.formatter.lineSplit=100 +org.eclipse.jdt.core.formatter.lineSplit=140 org.eclipse.jdt.core.formatter.tabulation.char=space org.eclipse.jdt.core.formatter.tabulation.size=4 diff --git a/core/build.gradle b/core/build.gradle index 8ce1e02ecd4ce..72aaca6da1c94 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -86,7 +86,8 @@ dependencies { // to bridge dependencies that are still on Log4j 1 to Log4j 2 compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional - compile "net.java.dev.jna:jna:${versions.jna}" + // repackaged jna with native bits linked against all elastic supported platforms + compile "org.elasticsearch:jna:${versions.jna}" if (isEclipse == false || project.path == ":core-tests") { testCompile("org.elasticsearch.test:framework:${version}") { diff --git a/core/licenses/jna-4.4.0.jar.sha1 b/core/licenses/jna-4.4.0.jar.sha1 index 9655b2c92e8f8..f760fe11e11ee 100644 --- a/core/licenses/jna-4.4.0.jar.sha1 +++ b/core/licenses/jna-4.4.0.jar.sha1 @@ -1 +1 @@ -cb208278274bf12ebdb56c61bd7407e6f774d65a \ No newline at end of file +6edc9b4514969d768039acf43f04210b15658cd7 \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/Build.java b/core/src/main/java/org/elasticsearch/Build.java index a05a412a87027..bef9fafe3ca70 100644 --- a/core/src/main/java/org/elasticsearch/Build.java +++ b/core/src/main/java/org/elasticsearch/Build.java @@ -43,8 +43,10 @@ public class Build { final String date; final boolean isSnapshot; + final String esPrefix = "elasticsearch-" + Version.CURRENT; final URL url = getElasticsearchCodebase(); - if (url.toString().endsWith(".jar")) { + final String urlStr = url.toString(); + if (urlStr.startsWith("file:/") && (urlStr.endsWith(esPrefix + ".jar") || urlStr.endsWith(esPrefix + "-SNAPSHOT.jar"))) { try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) { Manifest manifest = jar.getManifest(); shortHash = manifest.getMainAttributes().getValue("Change"); @@ -54,7 +56,7 @@ public class Build { throw new RuntimeException(e); } } else { - // not running from a jar (unit tests, IDE) + // not running from the official elasticsearch jar file (unit tests, IDE, uber client jar, shadiness) shortHash = "Unknown"; date = "Unknown"; isSnapshot = true; diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index f2308fd93fe54..fad0d403d28b3 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -35,48 +35,6 @@ public class Version implements Comparable { * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 * indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id */ - public static final int V_2_0_0_ID = 2000099; - public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final int V_2_0_1_ID = 2000199; - public static final Version V_2_0_1 = new Version(V_2_0_1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final int V_2_0_2_ID = 2000299; - public static final Version V_2_0_2 = new Version(V_2_0_2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final int V_2_1_0_ID = 2010099; - public static final Version V_2_1_0 = new Version(V_2_1_0_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); - public static final int V_2_1_1_ID = 2010199; - public static final Version V_2_1_1 = new Version(V_2_1_1_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); - public static final int V_2_1_2_ID = 2010299; - public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); - public static final int V_2_2_0_ID = 2020099; - public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); - public static final int V_2_2_1_ID = 2020199; - public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); - public static final int V_2_2_2_ID = 2020299; - public static final Version V_2_2_2 = new Version(V_2_2_2_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); - public static final int V_2_3_0_ID = 2030099; - public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_1_ID = 2030199; - public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_2_ID = 2030299; - public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_3_ID = 2030399; - public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_4_ID = 2030499; - public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_5_ID = 2030599; - public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_4_0_ID = 2040099; - public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_1_ID = 2040199; - public static final Version V_2_4_1 = new Version(V_2_4_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_2_ID = 2040299; - public static final Version V_2_4_2 = new Version(V_2_4_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_3_ID = 2040399; - public static final Version V_2_4_3 = new Version(V_2_4_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_4_ID = 2040499; - public static final Version V_2_4_4 = new Version(V_2_4_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_5_ID = 2040599; - public static final Version V_2_4_5 = new Version(V_2_4_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -120,6 +78,8 @@ public class Version implements Comparable { public static final Version V_5_3_1_UNRELEASED = new Version(V_5_3_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2); public static final int V_5_4_0_ID_UNRELEASED = 5040099; public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0); + public static final int V_5_5_0_ID_UNRELEASED = 5050099; + public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0); public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001; public static final Version V_6_0_0_alpha1_UNRELEASED = new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0); @@ -140,6 +100,8 @@ public static Version fromId(int id) { switch (id) { case V_6_0_0_alpha1_ID_UNRELEASED: return V_6_0_0_alpha1_UNRELEASED; + case V_5_5_0_ID_UNRELEASED: + return V_5_5_0_UNRELEASED; case V_5_4_0_ID_UNRELEASED: return V_5_4_0_UNRELEASED; case V_5_3_1_ID_UNRELEASED: @@ -182,48 +144,6 @@ public static Version fromId(int id) { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; - case V_2_4_5_ID: - return V_2_4_5; - case V_2_4_4_ID: - return V_2_4_4; - case V_2_4_3_ID: - return V_2_4_3; - case V_2_4_2_ID: - return V_2_4_2; - case V_2_4_1_ID: - return V_2_4_1; - case V_2_4_0_ID: - return V_2_4_0; - case V_2_3_5_ID: - return V_2_3_5; - case V_2_3_4_ID: - return V_2_3_4; - case V_2_3_3_ID: - return V_2_3_3; - case V_2_3_2_ID: - return V_2_3_2; - case V_2_3_1_ID: - return V_2_3_1; - case V_2_3_0_ID: - return V_2_3_0; - case V_2_2_2_ID: - return V_2_2_2; - case V_2_2_1_ID: - return V_2_2_1; - case V_2_2_0_ID: - return V_2_2_0; - case V_2_1_2_ID: - return V_2_1_2; - case V_2_1_1_ID: - return V_2_1_1; - case V_2_1_0_ID: - return V_2_1_0; - case V_2_0_2_ID: - return V_2_0_2; - case V_2_0_1_ID: - return V_2_0_1; - case V_2_0_0_ID: - return V_2_0_0; default: return new Version(id, org.apache.lucene.util.Version.LATEST); } diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index c1d0541d4ce10..d52175c9eb4e5 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; +import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; @@ -235,6 +237,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction; import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; @@ -400,6 +403,7 @@ public void reg actions.register(MainAction.INSTANCE, TransportMainAction.class); actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); + actions.register(RemoteInfoAction.INSTANCE, TransportRemoteInfoAction.class); actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class); @@ -509,6 +513,7 @@ public void initRestHandlers(Supplier nodesInCluster) { }; registerHandler.accept(new RestMainAction(settings, restController)); registerHandler.accept(new RestNodesInfoAction(settings, restController, settingsFilter)); + registerHandler.accept(new RestRemoteClusterInfoAction(settings, restController)); registerHandler.accept(new RestNodesStatsAction(settings, restController)); registerHandler.accept(new RestNodesHotThreadsAction(settings, restController)); registerHandler.accept(new RestClusterAllocationExplainAction(settings, restController)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java new file mode 100644 index 0000000000000..aa546c7dffd26 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public final class RemoteInfoAction extends Action { + + public static final String NAME = "cluster:monitor/remote/info"; + public static final RemoteInfoAction INSTANCE = new RemoteInfoAction(); + + public RemoteInfoAction() { + super(NAME); + } + + @Override + public RemoteInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RemoteInfoRequestBuilder(client, INSTANCE); + } + + @Override + public RemoteInfoResponse newResponse() { + return new RemoteInfoResponse(); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java new file mode 100644 index 0000000000000..6e41f145b65e7 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +public final class RemoteInfoRequest extends ActionRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java new file mode 100644 index 0000000000000..f46f5ecd2d3ca --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public final class RemoteInfoRequestBuilder extends ActionRequestBuilder { + + public RemoteInfoRequestBuilder(ElasticsearchClient client, RemoteInfoAction action) { + super(client, action, new RemoteInfoRequest()); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java new file mode 100644 index 0000000000000..6d79e23092291 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.search.RemoteConnectionInfo; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public final class RemoteInfoResponse extends ActionResponse implements ToXContentObject { + + private List infos; + + RemoteInfoResponse() { + } + + RemoteInfoResponse(Collection infos) { + this.infos = Collections.unmodifiableList(new ArrayList<>(infos)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(infos); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + infos = in.readList(RemoteConnectionInfo::new); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (RemoteConnectionInfo info : infos) { + info.toXContent(builder, params); + } + builder.endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java new file mode 100644 index 0000000000000..cdb79a825834b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.RemoteClusterService; +import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; + +public final class TransportRemoteInfoAction extends HandledTransportAction { + + private final RemoteClusterService remoteClusterService; + + @Inject + public TransportRemoteInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + SearchTransportService searchTransportService) { + super(settings, RemoteInfoAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + RemoteInfoRequest::new); + this.remoteClusterService = searchTransportService.getRemoteClusterService(); + } + + @Override + protected void doExecute(RemoteInfoRequest remoteInfoRequest, ActionListener listener) { + remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos + -> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure)); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index e4c6b34d9c7d7..b92839638d84d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -126,9 +126,7 @@ public void readFrom(StreamInput in) throws IOException { } } type = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_2_2_0)) { - attributes = (Map) in.readGenericValue(); - } + attributes = (Map) in.readGenericValue(); } @Override @@ -141,9 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(positionLength > 1 ? positionLength : null); } out.writeOptionalString(type); - if (out.getVersion().onOrAfter(Version.V_2_2_0)) { - out.writeGenericValue(attributes); - } + out.writeGenericValue(attributes); } } @@ -200,9 +196,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { tokens.add(AnalyzeToken.readAnalyzeToken(in)); } - if (in.getVersion().onOrAfter(Version.V_2_2_0)) { - detail = in.readOptionalStreamable(DetailAnalyzeResponse::new); - } + detail = in.readOptionalStreamable(DetailAnalyzeResponse::new); } @Override @@ -216,9 +210,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeVInt(0); } - if (out.getVersion().onOrAfter(Version.V_2_2_0)) { - out.writeOptionalStreamable(detail); - } + out.writeOptionalStreamable(detail); } static final class Fields { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 026946334ac6a..dd39f6c8ca33c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -63,9 +63,4 @@ protected ReplicaResult shardOperationOnReplica(ShardFlushRequest request, Index logger.trace("{} flush request executed on replica", replica.shardId()); return new ReplicaResult(); } - - @Override - protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) { - return true; - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index d1d8b4078b647..19cc1b134d7fb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -66,9 +66,4 @@ protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request, logger.trace("{} refresh request executed on replica", replica.shardId()); return new ReplicaResult(); } - - @Override - protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) { - return true; - } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java index c984dbd86775f..94d5342d2c022 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java @@ -317,7 +317,7 @@ void prepareBulkRequest(TimeValue thisBatchStartTime, ScrollableHitSource.Respon /* * If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation. */ - startNextScroll(thisBatchStartTime, 0); + startNextScroll(thisBatchStartTime, timeValueNanos(System.nanoTime()), 0); return; } request.timeout(mainRequest.getTimeout()); @@ -400,7 +400,7 @@ void onBulkResponse(TimeValue thisBatchStartTime, BulkResponse response) { return; } - startNextScroll(thisBatchStartTime, response.getItems().length); + startNextScroll(thisBatchStartTime, timeValueNanos(System.nanoTime()), response.getItems().length); } catch (Exception t) { finishHim(t); } @@ -412,12 +412,12 @@ void onBulkResponse(TimeValue thisBatchStartTime, BulkResponse response) { * @param lastBatchSize the number of requests sent in the last batch. This is used to calculate the throttling values which are applied * when the scroll returns */ - void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) { + void startNextScroll(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) { if (task.isCancelled()) { finishHim(null); return; } - TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, lastBatchSize); + TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, now, lastBatchSize); scrollSource.startNextScroll(extraKeepAlive, response -> { onScrollResponse(lastBatchStartTime, lastBatchSize, response); }); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java index 1b458caa3d5d6..28f08417abbd3 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java @@ -178,14 +178,14 @@ void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue lastBatchStartTime AbstractRunnable prepareBulkRequestRunnable) { // Synchronize so we are less likely to schedule the same request twice. synchronized (delayedPrepareBulkRequestReference) { - TimeValue delay = throttleWaitTime(lastBatchStartTime, lastBatchSize); + TimeValue delay = throttleWaitTime(lastBatchStartTime, timeValueNanos(System.nanoTime()), lastBatchSize); delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), delay, new RunOnce(prepareBulkRequestRunnable))); } } - TimeValue throttleWaitTime(TimeValue lastBatchStartTime, int lastBatchSize) { - long earliestNextBatchStartTime = lastBatchStartTime.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); + TimeValue throttleWaitTime(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) { + long earliestNextBatchStartTime = now.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); return timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())); } @@ -211,16 +211,12 @@ private void setRequestsPerSecond(float requestsPerSecond) { @Override public void rethrottle(float newRequestsPerSecond) { synchronized (delayedPrepareBulkRequestReference) { - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Rethrottling to [{}] requests per second", getId(), newRequestsPerSecond); - } + logger.debug("[{}]: rethrottling to [{}] requests per second", getId(), newRequestsPerSecond); setRequestsPerSecond(newRequestsPerSecond); DelayedPrepareBulkRequest delayedPrepareBulkRequest = this.delayedPrepareBulkRequestReference.get(); if (delayedPrepareBulkRequest == null) { - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Skipping rescheduling because there is no scheduled task", getId()); - } + logger.debug("[{}]: skipping rescheduling because there is no scheduled task", getId()); // No request has been queued yet so nothing to reschedule. return; } @@ -259,10 +255,8 @@ DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { * The user is attempting to slow the request down. We'll let the change in throttle take effect the next time we delay * prepareBulkRequest. We can't just reschedule the request further out in the future the bulk context might time out. */ - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Skipping rescheduling because the new throttle [{}] is slower than the old one [{}].", getId(), - newRequestsPerSecond, requestsPerSecond); - } + logger.debug("[{}]: skipping rescheduling because the new throttle [{}] is slower than the old one [{}]", getId(), + newRequestsPerSecond, requestsPerSecond); return this; } @@ -270,9 +264,7 @@ DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { // Actually reschedule the task if (false == FutureUtils.cancel(future)) { // Couldn't cancel, probably because the task has finished or been scheduled. Either way we have nothing to do here. - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Skipping rescheduling we couldn't cancel the task.", getId()); - } + logger.debug("[{}]: skipping rescheduling because we couldn't cancel the task", getId()); return this; } @@ -281,9 +273,7 @@ DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { * test it you'll find that requests sneak through. So each request is given a runOnce boolean to prevent that. */ TimeValue newDelay = newDelay(remainingDelay, newRequestsPerSecond); - if (logger.isDebugEnabled()) { - logger.debug("[{}]: Rescheduling for [{}] in the future.", getId(), newDelay); - } + logger.debug("[{}]: rescheduling for [{}] in the future", getId(), newDelay); return new DelayedPrepareBulkRequest(threadPool, requestsPerSecond, newDelay, command); } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java index 7dfcdcfa10841..6453e4dff3538 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldstats; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; @@ -200,9 +199,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(indexConstraint.getProperty().getId()); out.writeByte(indexConstraint.getComparison().getId()); out.writeString(indexConstraint.getValue()); - if (out.getVersion().onOrAfter(Version.V_2_0_1)) { - out.writeOptionalString(indexConstraint.getOptionalFormat()); - } + out.writeOptionalString(indexConstraint.getOptionalFormat()); } out.writeString(level); out.writeBoolean(useCache); diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java b/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java index 62eaf207e31da..fe39ba6e3772f 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldstats; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; @@ -39,11 +38,7 @@ public class IndexConstraint { this.property = Property.read(input.readByte()); this.comparison = Comparison.read(input.readByte()); this.value = input.readString(); - if (input.getVersion().onOrAfter(Version.V_2_0_1)) { - this.optionalFormat = input.readOptionalString(); - } else { - this.optionalFormat = null; - } + this.optionalFormat = input.readOptionalString(); } public IndexConstraint(String field, Property property, Comparison comparison, String value) { diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index ee835fa06be5e..884af4a3af998 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -68,13 +68,6 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { IndexMetaData indexMeta = state.getMetaData().index(request.concreteIndex()); - if (request.request().realtime && // if the realtime flag is set - request.request().preference() == null && // the preference flag is not already set - indexMeta != null && // and we have the index - indexMeta.isIndexUsingShadowReplicas()) { // and the index uses shadow replicas - // set the preference for the request to use "_primary" automatically - request.request().preference(Preference.PRIMARY.type()); - } // update the routing (request#index here is possibly an alias) request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index())); // Fail fast on the node that received the request. diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java index aea1aab7d3e36..a9739cfe21ac8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java @@ -23,6 +23,10 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -33,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -54,8 +59,10 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; @@ -65,6 +72,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.stream.Collectors; /** * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the @@ -521,4 +529,71 @@ boolean isNodeConnected(final DiscoveryNode node) { return connectedNodes.contains(node); } + + /** + * Fetches connection info for this connection + */ + public void getConnectionInfo(ActionListener listener) { + final Optional anyNode = connectedNodes.stream().findAny(); + if (anyNode.isPresent() == false) { + // not connected we return immediately + RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, + Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0, + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings)); + listener.onResponse(remoteConnectionStats); + } else { + NodesInfoRequest request = new NodesInfoRequest(); + request.clear(); + request.http(true); + + transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler() { + @Override + public NodesInfoResponse newInstance() { + return new NodesInfoResponse(); + } + + @Override + public void handleResponse(NodesInfoResponse response) { + Collection httpAddresses = new HashSet<>(); + for (NodeInfo info : response.getNodes()) { + if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) { + httpAddresses.add(info.getHttp().getAddress().publishAddress()); + } + } + + if (httpAddresses.size() < maxNumRemoteConnections) { + // just in case non of the connected nodes have http enabled we get other http enabled nodes instead. + for (NodeInfo info : response.getNodes()) { + if (nodePredicate.test(info.getNode()) && info.getHttp() != null) { + httpAddresses.add(info.getHttp().getAddress().publishAddress()); + } + if (httpAddresses.size() == maxNumRemoteConnections) { + break; // once we have enough return... + } + } + } + RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, + seedNodes.stream().map(n -> n.getAddress()).collect(Collectors.toList()), new ArrayList<>(httpAddresses), + maxNumRemoteConnections, connectedNodes.size(), + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings)); + listener.onResponse(remoteConnectionInfo); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + } + + } + + int getNumNodesConnected() { + return connectedNodes.size(); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java index 089ce57a1146b..34cb5a84da755 100644 --- a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java @@ -24,9 +24,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.common.Booleans; @@ -51,10 +52,12 @@ import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -111,11 +114,13 @@ public final class RemoteClusterService extends AbstractComponent implements Clo private final TransportService transportService; private final int numRemoteConnections; + private final ClusterNameExpressionResolver clusterNameResolver; private volatile Map remoteClusters = Collections.emptyMap(); RemoteClusterService(Settings settings, TransportService transportService) { super(settings); this.transportService = transportService; + this.clusterNameResolver = new ClusterNameExpressionResolver(settings); numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings); } @@ -203,25 +208,30 @@ boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode no */ Map> groupClusterIndices(String[] requestIndices, Predicate indexExists) { Map> perClusterIndices = new HashMap<>(); + Set remoteClusterNames = this.remoteClusters.keySet(); for (String index : requestIndices) { int i = index.indexOf(REMOTE_CLUSTER_INDEX_SEPARATOR); - String indexName = index; - String clusterName = LOCAL_CLUSTER_GROUP_KEY; if (i >= 0) { String remoteClusterName = index.substring(0, i); - if (isRemoteClusterRegistered(remoteClusterName)) { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusterNames, remoteClusterName); + if (clusters.isEmpty() == false) { if (indexExists.test(index)) { // we use : as a separator for remote clusters. might conflict if there is an index that is actually named // remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias // if that happens throw new IllegalArgumentException("Can not filter indices; index " + index + " exists but there is also a remote cluster named: " + remoteClusterName); + } + String indexName = index.substring(i + 1); + for (String clusterName : clusters) { + perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName); } - indexName = index.substring(i + 1); - clusterName = remoteClusterName; + } else { + perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index); } + } else { + perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index); } - perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList()).add(indexName); } return perClusterIndices; } @@ -413,4 +423,17 @@ void initializeRemoteClusters() { public void close() throws IOException { IOUtils.close(remoteClusters.values()); } + + public void getRemoteConnectionInfos(ActionListener> listener) { + final Map remoteClusters = this.remoteClusters; + if (remoteClusters.isEmpty()) { + listener.onResponse(Collections.emptyList()); + } else { + final GroupedActionListener actionListener = new GroupedActionListener<>(listener, + remoteClusters.size(), Collections.emptyList()); + for (RemoteClusterConnection connection : remoteClusters.values()) { + connection.getConnectionInfo(actionListener); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteConnectionInfo.java b/core/src/main/java/org/elasticsearch/action/search/RemoteConnectionInfo.java new file mode 100644 index 0000000000000..ff3548d215b59 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteConnectionInfo.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * This class encapsulates all remote cluster information to be rendered on + * _remote/info requests. + */ +public final class RemoteConnectionInfo implements ToXContent, Writeable { + final List seedNodes; + final List httpAddresses; + final int connectionsPerCluster; + final TimeValue initialConnectionTimeout; + final int numNodesConnected; + final String clusterAlias; + + RemoteConnectionInfo(String clusterAlias, List seedNodes, + List httpAddresses, + int connectionsPerCluster, int numNodesConnected, + TimeValue initialConnectionTimeout) { + this.clusterAlias = clusterAlias; + this.seedNodes = seedNodes; + this.httpAddresses = httpAddresses; + this.connectionsPerCluster = connectionsPerCluster; + this.numNodesConnected = numNodesConnected; + this.initialConnectionTimeout = initialConnectionTimeout; + } + + public RemoteConnectionInfo(StreamInput input) throws IOException { + seedNodes = input.readList(TransportAddress::new); + httpAddresses = input.readList(TransportAddress::new); + connectionsPerCluster = input.readVInt(); + initialConnectionTimeout = new TimeValue(input); + numNodesConnected = input.readVInt(); + clusterAlias = input.readString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(clusterAlias); + { + builder.startArray("seeds"); + for (TransportAddress addr : seedNodes) { + builder.value(addr.toString()); + } + builder.endArray(); + builder.startArray("http_addresses"); + for (TransportAddress addr : httpAddresses) { + builder.value(addr.toString()); + } + builder.endArray(); + builder.field("connected", numNodesConnected > 0); + builder.field("num_nodes_connected", numNodesConnected); + builder.field("max_connections_per_cluster", connectionsPerCluster); + builder.field("initial_connect_timeout", initialConnectionTimeout); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(seedNodes); + out.writeList(httpAddresses); + out.writeVInt(connectionsPerCluster); + initialConnectionTimeout.writeTo(out); + out.writeVInt(numNodesConnected); + out.writeString(clusterAlias); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RemoteConnectionInfo that = (RemoteConnectionInfo) o; + return connectionsPerCluster == that.connectionsPerCluster && + numNodesConnected == that.numNodesConnected && + Objects.equals(seedNodes, that.seedNodes) && + Objects.equals(httpAddresses, that.httpAddresses) && + Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) && + Objects.equals(clusterAlias, that.clusterAlias); + } + + @Override + public int hashCode() { + return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, numNodesConnected, clusterAlias); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 008d022a6556f..63a3ad0b62d63 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -60,7 +60,7 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( - "action.search.shard_count.limit", 1000L, 1L, Property.Dynamic, Property.NodeScope); + "action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope); private final ClusterService clusterService; private final SearchTransportService searchTransportService; diff --git a/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java b/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java new file mode 100644 index 0000000000000..85b418e046cc0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.CountDown; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An action listener that delegates it's results to another listener once + * it has received one or more failures or N results. This allows synchronous + * tasks to be forked off in a loop with the same listener and respond to a + * higher level listener once all tasks responded. + */ +public final class GroupedActionListener implements ActionListener { + private final CountDown countDown; + private final AtomicInteger pos = new AtomicInteger(); + private final AtomicArray roles; + private final ActionListener> delegate; + private final Collection defaults; + private final AtomicReference failure = new AtomicReference<>(); + + /** + * Creates a new listener + * @param delegate the delegate listener + * @param groupSize the group size + */ + public GroupedActionListener(ActionListener> delegate, int groupSize, + Collection defaults) { + roles = new AtomicArray<>(groupSize); + countDown = new CountDown(groupSize); + this.delegate = delegate; + this.defaults = defaults; + } + + @Override + public void onResponse(T element) { + roles.set(pos.incrementAndGet() - 1, element); + if (countDown.countDown()) { + if (failure.get() != null) { + delegate.onFailure(failure.get()); + } else { + List collect = this.roles.asList(); + collect.addAll(defaults); + delegate.onResponse(Collections.unmodifiableList(collect)); + } + } + } + + @Override + public void onFailure(Exception e) { + if (failure.compareAndSet(null, e) == false) { + failure.get().addSuppressed(e); + } + if (countDown.countDown()) { + delegate.onFailure(failure.get()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 7d627d45318ab..7f63faac49c2e 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -74,7 +74,6 @@ public class ReplicationOperation< */ private final AtomicInteger pendingActions = new AtomicInteger(); private final AtomicInteger successfulShards = new AtomicInteger(); - private final boolean executeOnReplicas; private final Primary primary; private final Replicas replicasProxy; private final AtomicBoolean finished = new AtomicBoolean(); @@ -86,9 +85,8 @@ public class ReplicationOperation< public ReplicationOperation(Request request, Primary primary, ActionListener listener, - boolean executeOnReplicas, Replicas replicas, + Replicas replicas, Supplier clusterStateSupplier, Logger logger, String opType) { - this.executeOnReplicas = executeOnReplicas; this.replicasProxy = replicas; this.primary = primary; this.resultListener = listener; @@ -160,7 +158,7 @@ private void performOnReplicas(ReplicaRequest replicaRequest, List final String localNodeId = primary.routingEntry().currentNodeId(); // If the index gets deleted after primary operation, we skip replication for (final ShardRouting shard : shards) { - if (executeOnReplicas == false || shard.unassigned()) { + if (shard.unassigned()) { if (shard.primary() == false) { totalShards.incrementAndGet(); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 7190879976811..e9a26778e7006 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -319,11 +319,10 @@ public void handleException(TransportException exp) { } else { setPhase(replicationTask, "primary"); final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); - final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData); final ActionListener listener = createResponseListener(primaryShardReference); createReplicatedOperation(request, ActionListener.wrap(result -> result.respond(listener), listener::onFailure), - primaryShardReference, executeOnReplicas) + primaryShardReference) .execute(); } } catch (Exception e) { @@ -371,9 +370,9 @@ public void onFailure(Exception e) { protected ReplicationOperation> createReplicatedOperation( Request request, ActionListener> listener, - PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + PrimaryShardReference primaryShardReference) { return new ReplicationOperation<>(request, primaryShardReference, listener, - executeOnReplicas, replicasProxy, clusterService::state, logger, actionName); + replicasProxy, clusterService::state, logger, actionName); } } @@ -909,14 +908,6 @@ public void onFailure(Exception e) { indexShard.acquirePrimaryOperationLock(onAcquired, executor); } - /** - * Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase - * will be skipped. For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do. - */ - protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) { - return indexMetaData.isIndexUsingShadowReplicas() == false; - } - class ShardReference implements Releasable { protected final IndexShard indexShard; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index d3766cc958cc1..2b47908c35260 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -338,13 +338,12 @@ static void init( INSTANCE.setup(true, environment); - /* TODO: close this once s3 repository doesn't try to read during repository construction try { // any secure settings must be read during node construction IOUtils.close(keystore); } catch (IOException e) { throw new BootstrapException(e); - }*/ + } INSTANCE.start(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index fda6e6cfdec92..fd050658a3e8b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -24,11 +24,11 @@ import joptsimple.OptionSpecBuilder; import joptsimple.util.PathConverter; import org.elasticsearch.Build; -import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.NodeValidationException; @@ -37,7 +37,6 @@ import java.nio.file.Path; import java.security.Permission; import java.util.Arrays; -import java.util.Map; /** * This class starts elasticsearch. @@ -80,6 +79,7 @@ public void checkPermission(Permission perm) { // grant all permissions so that we can later set the security manager to the one that we want } }); + LogConfigurator.registerErrorListener(); final Elasticsearch elasticsearch = new Elasticsearch(); int status = main(args, elasticsearch, Terminal.DEFAULT); if (status != ExitCodes.OK) { diff --git a/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java index 8372a6b8ab852..79a4fd7329fad 100644 --- a/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java +++ b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java @@ -45,7 +45,16 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { final Map settings = new HashMap<>(); for (final KeyValuePair kvp : settingOption.values(options)) { if (kvp.value.isEmpty()) { - throw new UserException(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty"); + throw new UserException(ExitCodes.USAGE, "setting [" + kvp.key + "] must not be empty"); + } + if (settings.containsKey(kvp.key)) { + final String message = String.format( + Locale.ROOT, + "setting [%s] already set, saw [%s] and [%s]", + kvp.key, + settings.get(kvp.key), + kvp.value); + throw new UserException(ExitCodes.USAGE, message); } settings.put(kvp.key, kvp.value); } diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index a9392d3c017de..b0baac6bd9029 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -383,13 +383,6 @@ static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpen if (logger.isTraceEnabled()) { logger.trace("shard: {} size: {}", sid, size); } - if (indexMeta != null && indexMeta.isIndexUsingShadowReplicas()) { - // Shards on a shared filesystem should be considered of size 0 - if (logger.isTraceEnabled()) { - logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid); - } - size = 0; - } newShardSizes.put(sid, size); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java new file mode 100644 index 0000000000000..2032c2f4ef3ba --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Resolves cluster names from an expression. The expression must be the exact match of a cluster + * name or must be a wildcard expression. + */ +public final class ClusterNameExpressionResolver extends AbstractComponent { + + private final WildcardExpressionResolver wildcardResolver = new WildcardExpressionResolver(); + + public ClusterNameExpressionResolver(Settings settings) { + super(settings); + } + + /** + * Resolves the provided cluster expression to matching cluster names. This method only + * supports exact or wildcard matches. + * + * @param remoteClusters the aliases for remote clusters + * @param clusterExpression the expressions that can be resolved to cluster names. + * @return the resolved cluster aliases. + */ + public List resolveClusterNames(Set remoteClusters, String clusterExpression) { + if (remoteClusters.contains(clusterExpression)) { + return Collections.singletonList(clusterExpression); + } else if (Regex.isSimpleMatchPattern(clusterExpression)) { + return wildcardResolver.resolve(remoteClusters, clusterExpression); + } else { + return Collections.emptyList(); + } + } + + private static class WildcardExpressionResolver { + + private List resolve(Set remoteClusters, String clusterExpression) { + if (isTrivialWildcard(clusterExpression)) { + return resolveTrivialWildcard(remoteClusters); + } + + Set matches = matches(remoteClusters, clusterExpression); + if (matches.isEmpty()) { + return Collections.emptyList(); + } else { + return new ArrayList<>(matches); + } + } + + private boolean isTrivialWildcard(String clusterExpression) { + return Regex.isMatchAllPattern(clusterExpression); + } + + private List resolveTrivialWildcard(Set remoteClusters) { + return new ArrayList<>(remoteClusters); + } + + private static Set matches(Set remoteClusters, String expression) { + if (expression.indexOf("*") == expression.length() - 1) { + return otherWildcard(remoteClusters, expression); + } else { + return otherWildcard(remoteClusters, expression); + } + } + + private static Set otherWildcard(Set remoteClusters, String expression) { + final String pattern = expression; + return remoteClusters.stream() + .filter(n -> Regex.simpleMatch(pattern, n)) + .collect(Collectors.toSet()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 713fce2848f4a..67f4d71bd4e1d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -192,18 +192,11 @@ static Setting buildNumberOfShardsSetting() { public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas"; public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope); - public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas"; - public static final Setting INDEX_SHADOW_REPLICAS_SETTING = - Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope, Property.Deprecated); public static final String SETTING_ROUTING_PARTITION_SIZE = "index.routing_partition_size"; public static final Setting INDEX_ROUTING_PARTITION_SIZE_SETTING = Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope); - public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem"; - public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = - Setting.boolSetting(SETTING_SHARED_FILESYSTEM, INDEX_SHADOW_REPLICAS_SETTING, Property.IndexScope, Property.Deprecated); - public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; public static final String SETTING_READ_ONLY = "index.blocks.read_only"; @@ -240,10 +233,6 @@ static Setting buildNumberOfShardsSetting() { public static final String SETTING_DATA_PATH = "index.data_path"; public static final Setting INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope); - public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; - public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = - Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, - Property.Dynamic, Property.IndexScope, Property.Deprecated); public static final String INDEX_UUID_NA_VALUE = "_na_"; public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require"; @@ -1237,35 +1226,6 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti } } - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(IndexMetaData.class)); - - /** - * Returns true iff the given settings indicate that the index - * associated with these settings allocates it's shards on a shared - * filesystem. Otherwise false. The default setting for this - * is the returned value from - * {@link #isIndexUsingShadowReplicas(org.elasticsearch.common.settings.Settings)}. - */ - public boolean isOnSharedFilesystem(Settings settings) { - // don't use the setting directly, not to trigger verbose deprecation logging - return settings.getAsBooleanLenientForPreEs6Indices( - this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings), deprecationLogger); - } - - /** - * Returns true iff the given settings indicate that the index associated - * with these settings uses shadow replicas. Otherwise false. The default - * setting for this is false. - */ - public boolean isIndexUsingShadowReplicas() { - return isIndexUsingShadowReplicas(this.settings); - } - - public boolean isIndexUsingShadowReplicas(Settings settings) { - // don't use the setting directly, not to trigger verbose deprecation logging - return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false, deprecationLogger); - } - /** * Adds human readable version and creation date settings. * This method is used to display the settings in a human readable format in REST API diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 1a878919749d0..2cb93373700f3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -433,10 +433,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { .put(indexMetaData, false) .build(); - String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : ""; - logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}", + logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(), - indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet()); + indexMetaData.getNumberOfReplicas(), mappings.keySet()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); if (!request.blocks().isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 7619b0cc95e16..d80a1c326cff6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -139,8 +139,7 @@ boolean validate(MetaData metaData) { "allocation set " + inSyncAllocationIds); } - if (indexMetaData.isIndexUsingShadowReplicas() == false && // see #20650 - shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false && + if (shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false && RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) throw new IllegalStateException("a primary shard routing " + shardRouting + " is a primary that is recovering from " + diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java index 0f3a8c6f214c6..883b4c22f7fc0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java @@ -69,6 +69,12 @@ public interface RoutingChangesObserver { */ void replicaPromoted(ShardRouting replicaShard); + /** + * Called when an initializing replica is reinitialized. This happens when a primary relocation completes, which + * reinitializes all currently initializing replicas as their recovery source node changes + */ + void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica); + /** * Abstract implementation of {@link RoutingChangesObserver} that does not take any action. Useful for subclasses that only override @@ -120,6 +126,11 @@ public void startedPrimaryReinitialized(ShardRouting startedPrimaryShard, ShardR public void replicaPromoted(ShardRouting replicaShard) { } + + @Override + public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) { + + } } class DelegatingRoutingChangesObserver implements RoutingChangesObserver { @@ -192,5 +203,12 @@ public void replicaPromoted(ShardRouting replicaShard) { routingChangesObserver.replicaPromoted(replicaShard); } } + + @Override + public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) { + for (RoutingChangesObserver routingChangesObserver : routingChangesObservers) { + routingChangesObserver.initializedReplicaReinitialized(oldReplica, reinitializedReplica); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 45d567b657e9b..3e9303d3d4230 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -451,6 +451,9 @@ public Tuple relocateShard(ShardRouting startedShard, * * Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source. * + * If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their + * recovery source changes + * * @return the started shard */ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) { @@ -468,6 +471,30 @@ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, Ro + initializingShard + " but was: " + relocationSourceShard.getTargetRelocatingShard(); remove(relocationSourceShard); routingChangesObserver.relocationCompleted(relocationSourceShard); + + // if this is a primary shard with ongoing replica recoveries, reinitialize them as their recovery source changed + if (startedShard.primary()) { + List assignedShards = assignedShards(startedShard.shardId()); + // copy list to prevent ConcurrentModificationException + for (ShardRouting routing : new ArrayList<>(assignedShards)) { + if (routing.initializing() && routing.primary() == false) { + if (routing.isRelocationTarget()) { + // find the relocation source + ShardRouting sourceShard = getByAllocationId(routing.shardId(), routing.allocationId().getRelocationId()); + // cancel relocation and start relocation to same node again + ShardRouting startedReplica = cancelRelocation(sourceShard); + remove(routing); + routingChangesObserver.shardFailed(routing, + new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "primary changed")); + relocateShard(startedReplica, sourceShard.relocatingNodeId(), + sourceShard.getExpectedShardSize(), routingChangesObserver); + } else { + ShardRouting reinitializedReplica = reinitReplica(routing); + routingChangesObserver.initializedReplicaReinitialized(routing, reinitializedReplica); + } + } + } + } } return startedShard; } @@ -540,9 +567,6 @@ assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId if (failedShard.primary()) { // promote active replica to primary if active replica exists (only the case for shadow replicas) ShardRouting activeReplica = activeReplica(failedShard.shardId()); - assert activeReplica == null || indexMetaData.isIndexUsingShadowReplicas() : - "initializing primary [" + failedShard + "] with active replicas [" + activeReplica + "] only expected when " + - "using shadow replicas"; if (activeReplica == null) { moveToUnassigned(failedShard, unassignedInfo); } else { @@ -599,10 +623,6 @@ private void promoteReplicaToPrimary(ShardRouting activeReplica, IndexMetaData i assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica; ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica); routingChangesObserver.replicaPromoted(activeReplica); - if (indexMetaData.isIndexUsingShadowReplicas()) { - ShardRouting initializedShard = reinitShadowPrimary(primarySwappedCandidate); - routingChangesObserver.startedPrimaryReinitialized(primarySwappedCandidate, initializedShard); - } } /** @@ -730,6 +750,15 @@ private ShardRouting reinitShadowPrimary(ShardRouting candidate) { return reinitializedShard; } + private ShardRouting reinitReplica(ShardRouting shard) { + assert shard.primary() == false : "shard must be a replica: " + shard; + assert shard.initializing() : "can only reinitialize an initializing replica: " + shard; + assert shard.isRelocationTarget() == false : "replication target cannot be reinitialized: " + shard; + ShardRouting reinitializedShard = shard.reinitializeReplicaShard(); + updateAssigned(shard, reinitializedShard); + return reinitializedShard; + } + private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) { assert oldShard.shardId().equals(newShard.shardId()) : "can only update " + oldShard + " by shard with same shard id but was " + newShard; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 4db922d5aeb93..3a60e5338d798 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -393,6 +393,17 @@ StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.R allocationId, UNAVAILABLE_EXPECTED_SHARD_SIZE); } + /** + * Reinitializes a replica shard, giving it a fresh allocation id + */ + public ShardRouting reinitializeReplicaShard() { + assert state == ShardRoutingState.INITIALIZING : this; + assert primary == false : this; + assert isRelocationTarget() == false : this; + return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.INITIALIZING, + recoverySource, unassignedInfo, AllocationId.newInitializing(), expectedShardSize); + } + /** * Set the shards state to STARTED. The shards state must be * INITIALIZING or RELOCATING. Any relocation will be diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index ff5b8e63d5e16..8974c8a4a9a0b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -41,7 +41,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; +import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.function.Function; @@ -88,6 +90,9 @@ public ClusterState applyStartedShards(ClusterState clusterState, List(startedShards); + Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary)); applyStartedShards(allocation, startedShards); gatewayAllocator.applyStartedShards(allocation, startedShards); reroute(allocation); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java index 42e80689eeca5..3e465e42b4453 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java @@ -96,6 +96,17 @@ public void replicaPromoted(ShardRouting replicaShard) { setChanged(); } + @Override + public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) { + assert oldReplica.initializing() && oldReplica.primary() == false : + "expected initializing replica shard " + oldReplica; + assert reinitializedReplica.initializing() && reinitializedReplica.primary() == false : + "expected reinitialized replica shard " + reinitializedReplica; + assert oldReplica.allocationId().getId().equals(reinitializedReplica.allocationId().getId()) == false : + "expected allocation id to change for reinitialized replica shard (old: " + oldReplica + " new: " + reinitializedReplica + ")"; + setChanged(); + } + /** * Marks the allocation as changed. */ diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 5e20b6c37e3f8..0024d83b80506 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -30,6 +30,10 @@ import org.apache.logging.log4j.core.config.composite.CompositeConfiguration; import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration; import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory; +import org.apache.logging.log4j.status.StatusConsoleListener; +import org.apache.logging.log4j.status.StatusData; +import org.apache.logging.log4j.status.StatusListener; +import org.apache.logging.log4j.status.StatusLogger; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.UserException; import org.elasticsearch.cluster.ClusterName; @@ -51,9 +55,36 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.StreamSupport; public class LogConfigurator { + /* + * We want to detect situations where we touch logging before the configuration is loaded. If we do this, Log4j will status log an error + * message at the error level. With this error listener, we can capture if this happens. More broadly, we can detect any error-level + * status log message which likely indicates that something is broken. The listener is installed immediately on startup, and then when + * we get around to configuring logging we check that no error-level log messages have been logged by the status logger. If they have we + * fail startup and any such messages can be seen on the console. + */ + private static final AtomicBoolean error = new AtomicBoolean(); + private static final StatusListener ERROR_LISTENER = new StatusConsoleListener(Level.ERROR) { + @Override + public void log(StatusData data) { + error.set(true); + super.log(data); + } + }; + + /** + * Registers a listener for status logger errors. This listener should be registered as early as possible to ensure that no errors are + * logged by the status logger before logging is configured. + */ + public static void registerErrorListener() { + error.set(false); + StatusLogger.getLogger().registerListener(ERROR_LISTENER); + } + /** * Configure logging without reading a log4j2.properties file, effectively configuring the * status logger and all loggers to the console. @@ -79,9 +110,27 @@ public static void configureWithoutConfig(final Settings settings) { */ public static void configure(final Environment environment) throws IOException, UserException { Objects.requireNonNull(environment); + try { + // we are about to configure logging, check that the status logger did not log any error-level messages + checkErrorListener(); + } finally { + // whether or not the error listener check failed we can remove the listener now + StatusLogger.getLogger().removeListener(ERROR_LISTENER); + } configure(environment.settings(), environment.configFile(), environment.logsFile()); } + private static void checkErrorListener() { + assert errorListenerIsRegistered() : "expected error listener to be registered"; + if (error.get()) { + throw new IllegalStateException("status logger logged an error before logging was configured"); + } + } + + private static boolean errorListenerIsRegistered() { + return StreamSupport.stream(StatusLogger.getLogger().getListeners().spliterator(), false).anyMatch(l -> l == ERROR_LISTENER); + } + private static void configure(final Settings settings, final Path configsPath, final Path logsPath) throws IOException, UserException { Objects.requireNonNull(settings); Objects.requireNonNull(configsPath); diff --git a/core/src/main/java/org/elasticsearch/common/settings/AddFileKeyStoreCommand.java b/core/src/main/java/org/elasticsearch/common/settings/AddFileKeyStoreCommand.java new file mode 100644 index 0000000000000..5ccac9a2ac3fa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/AddFileKeyStoreCommand.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.env.Environment; + +/** + * A subcommand for the keystore cli which adds a file setting. + */ +class AddFileKeyStoreCommand extends EnvironmentAwareCommand { + + private final OptionSpec forceOption; + private final OptionSpec arguments; + + AddFileKeyStoreCommand() { + super("Add a file setting to the keystore"); + this.forceOption = parser.acceptsAll(Arrays.asList("f", "force"), "Overwrite existing setting without prompting"); + // jopt simple has issue with multiple non options, so we just get one set of them here + // and convert to File when necessary + // see https://github.com/jopt-simple/jopt-simple/issues/103 + this.arguments = parser.nonOptions("setting [filepath]"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + if (keystore == null) { + throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one."); + } + + keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */); + + List argumentValues = arguments.values(options); + if (argumentValues.size() == 0) { + throw new UserException(ExitCodes.USAGE, "Missing setting name"); + } + String setting = argumentValues.get(0); + if (keystore.getSettingNames().contains(setting) && options.has(forceOption) == false) { + if (terminal.promptYesNo("Setting " + setting + " already exists. Overwrite?", false) == false) { + terminal.println("Exiting without modifying keystore."); + return; + } + } + + if (argumentValues.size() == 1) { + throw new UserException(ExitCodes.USAGE, "Missing file name"); + } + Path file = getPath(argumentValues.get(1)); + if (Files.exists(file) == false) { + throw new UserException(ExitCodes.IO_ERROR, "File [" + file.toString() + "] does not exist"); + } + if (argumentValues.size() > 2) { + throw new UserException(ExitCodes.USAGE, "Unrecognized extra arguments [" + + String.join(", ", argumentValues.subList(2, argumentValues.size())) + "] after filepath"); + } + keystore.setFile(setting, Files.readAllBytes(file)); + keystore.save(env.configFile()); + } + + @SuppressForbidden(reason="file arg for cli") + private Path getPath(String file) { + return PathUtils.get(file); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index bb7aa223f72dc..8478a79068921 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -368,7 +368,6 @@ public void apply(Settings value, Settings current, Settings previous) { TribeService.TRIBE_NAME_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, - NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH, OsService.REFRESH_INTERVAL_SETTING, ProcessService.REFRESH_INTERVAL_SETTING, JvmService.REFRESH_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index a072b68b2770d..efbe7acf5e1b6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -70,13 +70,10 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING, - IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING, - IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING, IndexMetaData.INDEX_READ_ONLY_SETTING, IndexMetaData.INDEX_BLOCKS_READ_SETTING, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, - IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING, IndexMetaData.INDEX_PRIORITY_SETTING, IndexMetaData.INDEX_DATA_PATH_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java index 5bded392fdbc9..c2345f2ddd866 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java @@ -32,6 +32,7 @@ private KeyStoreCli() { subcommands.put("create", new CreateKeyStoreCommand()); subcommands.put("list", new ListKeyStoreCommand()); subcommands.put("add", new AddStringKeyStoreCommand()); + subcommands.put("add-file", new AddStringKeyStoreCommand()); subcommands.put("remove", new RemoveSettingKeyStoreCommand()); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index e4dd982512d6b..338987cc714e2 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -25,7 +25,6 @@ import javax.security.auth.DestroyFailedException; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.nio.CharBuffer; @@ -41,10 +40,14 @@ import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; +import java.util.Base64; import java.util.Enumeration; +import java.util.HashMap; import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.BufferedChecksumIndexInput; @@ -54,7 +57,6 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; /** * A wrapper around a Java KeyStore which provides supplements the keystore with extra metadata. @@ -67,29 +69,52 @@ */ public class KeyStoreWrapper implements SecureSettings { + /** An identifier for the type of data that may be stored in a keystore entry. */ + private enum KeyType { + STRING, + FILE + } + /** The name of the keystore file to read and write. */ private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; /** The version of the metadata written before the keystore data. */ - private static final int FORMAT_VERSION = 1; + private static final int FORMAT_VERSION = 2; + + /** The oldest metadata format version that can be read. */ + private static final int MIN_FORMAT_VERSION = 1; /** The keystore type for a newly created keystore. */ private static final String NEW_KEYSTORE_TYPE = "PKCS12"; - /** The algorithm used to store password for a newly created keystore. */ - private static final String NEW_KEYSTORE_SECRET_KEY_ALGO = "PBE";//"PBEWithHmacSHA256AndAES_128"; + /** The algorithm used to store string setting contents. */ + private static final String NEW_KEYSTORE_STRING_KEY_ALGO = "PBE"; + + /** The algorithm used to store file setting contents. */ + private static final String NEW_KEYSTORE_FILE_KEY_ALGO = "PBE"; /** An encoder to check whether string values are ascii. */ private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder(); + /** The metadata format version used to read the current keystore wrapper. */ + private final int formatVersion; + /** True iff the keystore has a password needed to read. */ private final boolean hasPassword; /** The type of the keystore, as passed to {@link java.security.KeyStore#getInstance(String)} */ private final String type; - /** A factory necessary for constructing instances of secrets in a {@link KeyStore}. */ - private final SecretKeyFactory secretFactory; + /** A factory necessary for constructing instances of string secrets in a {@link KeyStore}. */ + private final SecretKeyFactory stringFactory; + + /** A factory necessary for constructing instances of file secrets in a {@link KeyStore}. */ + private final SecretKeyFactory fileFactory; + + /** + * The settings that exist in the keystore, mapped to their type of data. + */ + private final Map settingTypes; /** The raw bytes of the encrypted keystore. */ private final byte[] keystoreBytes; @@ -100,17 +125,19 @@ public class KeyStoreWrapper implements SecureSettings { /** The password for the keystore. See {@link #decrypt(char[])}. */ private final SetOnce keystorePassword = new SetOnce<>(); - /** The setting names contained in the loaded keystore. */ - private final Set settingNames = new HashSet<>(); - - private KeyStoreWrapper(boolean hasPassword, String type, String secretKeyAlgo, byte[] keystoreBytes) { + private KeyStoreWrapper(int formatVersion, boolean hasPassword, String type, + String stringKeyAlgo, String fileKeyAlgo, + Map settingTypes, byte[] keystoreBytes) { + this.formatVersion = formatVersion; this.hasPassword = hasPassword; this.type = type; try { - secretFactory = SecretKeyFactory.getInstance(secretKeyAlgo); + stringFactory = SecretKeyFactory.getInstance(stringKeyAlgo); + fileFactory = SecretKeyFactory.getInstance(fileKeyAlgo); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } + this.settingTypes = settingTypes; this.keystoreBytes = keystoreBytes; } @@ -121,7 +148,8 @@ static Path keystorePath(Path configDir) { /** Constructs a new keystore with the given password. */ static KeyStoreWrapper create(char[] password) throws Exception { - KeyStoreWrapper wrapper = new KeyStoreWrapper(password.length != 0, NEW_KEYSTORE_TYPE, NEW_KEYSTORE_SECRET_KEY_ALGO, null); + KeyStoreWrapper wrapper = new KeyStoreWrapper(FORMAT_VERSION, password.length != 0, NEW_KEYSTORE_TYPE, + NEW_KEYSTORE_STRING_KEY_ALGO, NEW_KEYSTORE_FILE_KEY_ALGO, new HashMap<>(), null); KeyStore keyStore = KeyStore.getInstance(NEW_KEYSTORE_TYPE); keyStore.load(null, null); wrapper.keystore.set(keyStore); @@ -144,7 +172,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { SimpleFSDirectory directory = new SimpleFSDirectory(configDir); try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) { ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput); - CodecUtil.checkHeader(input, KEYSTORE_FILENAME, FORMAT_VERSION, FORMAT_VERSION); + int formatVersion = CodecUtil.checkHeader(input, KEYSTORE_FILENAME, MIN_FORMAT_VERSION, FORMAT_VERSION); byte hasPasswordByte = input.readByte(); boolean hasPassword = hasPasswordByte == 1; if (hasPassword == false && hasPasswordByte != 0) { @@ -152,11 +180,25 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { + String.format(Locale.ROOT, "%02x", hasPasswordByte)); } String type = input.readString(); - String secretKeyAlgo = input.readString(); + String stringKeyAlgo = input.readString(); + final String fileKeyAlgo; + if (formatVersion >= 2) { + fileKeyAlgo = input.readString(); + } else { + fileKeyAlgo = NEW_KEYSTORE_FILE_KEY_ALGO; + } + final Map settingTypes; + if (formatVersion >= 2) { + settingTypes = input.readMapOfStrings().entrySet().stream().collect(Collectors.toMap( + Map.Entry::getKey, + e -> KeyType.valueOf(e.getValue()))); + } else { + settingTypes = new HashMap<>(); + } byte[] keystoreBytes = new byte[input.readInt()]; input.readBytes(keystoreBytes, 0, keystoreBytes.length); CodecUtil.checkFooter(input); - return new KeyStoreWrapper(hasPassword, type, secretKeyAlgo, keystoreBytes); + return new KeyStoreWrapper(formatVersion, hasPassword, type, stringKeyAlgo, fileKeyAlgo, settingTypes, keystoreBytes); } } @@ -189,10 +231,24 @@ public void decrypt(char[] password) throws GeneralSecurityException, IOExceptio keystorePassword.set(new KeyStore.PasswordProtection(password)); Arrays.fill(password, '\0'); - // convert keystore aliases enum into a set for easy lookup + Enumeration aliases = keystore.get().aliases(); - while (aliases.hasMoreElements()) { - settingNames.add(aliases.nextElement()); + if (formatVersion == 1) { + while (aliases.hasMoreElements()) { + settingTypes.put(aliases.nextElement(), KeyType.STRING); + } + } else { + // verify integrity: keys in keystore match what the metadata thinks exist + Set expectedSettings = new HashSet<>(settingTypes.keySet()); + while (aliases.hasMoreElements()) { + String settingName = aliases.nextElement(); + if (expectedSettings.remove(settingName) == false) { + throw new SecurityException("Keystore has been corrupted or tampered with"); + } + } + if (expectedSettings.isEmpty() == false) { + throw new SecurityException("Keystore has been corrupted or tampered with"); + } } } @@ -206,8 +262,19 @@ void save(Path configDir) throws Exception { try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) { CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION); output.writeByte(password.length == 0 ? (byte)0 : (byte)1); - output.writeString(type); - output.writeString(secretFactory.getAlgorithm()); + output.writeString(NEW_KEYSTORE_TYPE); + output.writeString(NEW_KEYSTORE_STRING_KEY_ALGO); + output.writeString(NEW_KEYSTORE_FILE_KEY_ALGO); + output.writeMapOfStrings(settingTypes.entrySet().stream().collect(Collectors.toMap( + Map.Entry::getKey, + e -> e.getValue().name()))); + + // TODO: in the future if we ever change any algorithms used above, we need + // to create a new KeyStore here instead of using the existing one, so that + // the encoded material inside the keystore is updated + assert type.equals(NEW_KEYSTORE_TYPE) : "keystore type changed"; + assert stringFactory.getAlgorithm().equals(NEW_KEYSTORE_STRING_KEY_ALGO) : "string pbe algo changed"; + assert fileFactory.getAlgorithm().equals(NEW_KEYSTORE_FILE_KEY_ALGO) : "file pbe algo changed"; ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); keystore.get().store(keystoreBytesStream, password); @@ -228,25 +295,51 @@ void save(Path configDir) throws Exception { @Override public Set getSettingNames() { - return settingNames; + return settingTypes.keySet(); } // TODO: make settings accessible only to code that registered the setting - /** Retrieve a string setting. The {@link SecureString} should be closed once it is used. */ @Override public SecureString getString(String setting) throws GeneralSecurityException { KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get()); - if (entry instanceof KeyStore.SecretKeyEntry == false) { + if (settingTypes.get(setting) != KeyType.STRING || + entry instanceof KeyStore.SecretKeyEntry == false) { throw new IllegalStateException("Secret setting " + setting + " is not a string"); } // TODO: only allow getting a setting once? KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry; - PBEKeySpec keySpec = (PBEKeySpec) secretFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); + PBEKeySpec keySpec = (PBEKeySpec) stringFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); SecureString value = new SecureString(keySpec.getPassword()); keySpec.clearPassword(); return value; } + @Override + public InputStream getFile(String setting) throws GeneralSecurityException { + KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get()); + if (settingTypes.get(setting) != KeyType.FILE || + entry instanceof KeyStore.SecretKeyEntry == false) { + throw new IllegalStateException("Secret setting " + setting + " is not a file"); + } + KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry; + PBEKeySpec keySpec = (PBEKeySpec) fileFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); + // The PBE keyspec gives us chars, we first convert to bytes, then decode base64 inline. + char[] chars = keySpec.getPassword(); + byte[] bytes = new byte[chars.length]; + for (int i = 0; i < bytes.length; ++i) { + bytes[i] = (byte)chars[i]; // PBE only stores the lower 8 bits, so this narrowing is ok + } + keySpec.clearPassword(); // wipe the original copy + InputStream bytesStream = new ByteArrayInputStream(bytes) { + @Override + public void close() throws IOException { + super.close(); + Arrays.fill(bytes, (byte)0); // wipe our second copy when the stream is exhausted + } + }; + return Base64.getDecoder().wrap(bytesStream); + } + /** * Set a string setting. * @@ -256,15 +349,27 @@ void setString(String setting, char[] value) throws GeneralSecurityException { if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) { throw new IllegalArgumentException("Value must be ascii"); } - SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec(value)); + SecretKey secretKey = stringFactory.generateSecret(new PBEKeySpec(value)); + keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get()); + settingTypes.put(setting, KeyType.STRING); + } + + /** Set a file setting. */ + void setFile(String setting, byte[] bytes) throws GeneralSecurityException { + bytes = Base64.getEncoder().encode(bytes); + char[] chars = new char[bytes.length]; + for (int i = 0; i < chars.length; ++i) { + chars[i] = (char)bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok + } + SecretKey secretKey = stringFactory.generateSecret(new PBEKeySpec(chars)); keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get()); - settingNames.add(setting); + settingTypes.put(setting, KeyType.FILE); } /** Remove the given setting from the keystore. */ void remove(String setting) throws KeyStoreException { keystore.get().deleteEntry(setting); - settingNames.remove(setting); + settingTypes.remove(setting); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index a9e4effb0d910..2efb36696c5f7 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; +import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.Arrays; import java.util.HashSet; @@ -137,5 +138,26 @@ SecureString getFallback(Settings settings) { }; } + /** + * A setting which contains a file. Reading the setting opens an input stream to the file. + * + * This may be any sensitive file, e.g. a set of credentials normally in plaintext. + */ + public static Setting secureFile(String name, Setting fallback, + Property... properties) { + return new SecureSetting(name, properties) { + @Override + protected InputStream getSecret(SecureSettings secureSettings) throws GeneralSecurityException { + return secureSettings.getFile(getKey()); + } + @Override + InputStream getFallback(Settings settings) { + if (fallback != null) { + return fallback.get(settings); + } + return null; + } + }; + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java index f70986864662d..c5a364f54731c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.settings; import java.io.Closeable; +import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.Set; @@ -36,4 +37,7 @@ public interface SecureSettings extends Closeable { /** Return a string setting. The {@link SecureString} should be closed once it is used. */ SecureString getString(String setting) throws GeneralSecurityException; + + /** Return a file setting. The {@link InputStream} should be closed once it is used. */ + InputStream getFile(String setting) throws GeneralSecurityException; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index db1cf44db2234..b4ab00267c524 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -57,6 +57,7 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; @@ -442,6 +443,20 @@ public String[] getAsArray(String settingPrefix, String[] defaultArray) throws S public String[] getAsArray(String settingPrefix, String[] defaultArray, Boolean commaDelimited) throws SettingsException { List result = new ArrayList<>(); + final String valueFromPrefix = get(settingPrefix); + final String valueFromPreifx0 = get(settingPrefix + ".0"); + + if (valueFromPrefix != null && valueFromPreifx0 != null) { + final String message = String.format( + Locale.ROOT, + "settings object contains values for [%s=%s] and [%s=%s]", + settingPrefix, + valueFromPrefix, + settingPrefix + ".0", + valueFromPreifx0); + throw new IllegalStateException(message); + } + if (get(settingPrefix) != null) { if (commaDelimited) { String[] strings = Strings.splitStringByCommaToArray(get(settingPrefix)); @@ -1294,6 +1309,11 @@ public SecureString getString(String setting) throws GeneralSecurityException{ return delegate.getString(keyTransform.apply(setting)); } + @Override + public InputStream getFile(String setting) throws GeneralSecurityException{ + return delegate.getFile(keyTransform.apply(setting)); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index e531408b57aa7..ab969b17d499b 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -157,13 +157,6 @@ public String toString() { public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1, Property.NodeScope); - /** - * If true automatically append node lock id to custom data paths. - */ - public static final Setting ADD_NODE_LOCK_ID_TO_CUSTOM_PATH = - Setting.boolSetting("node.add_lock_id_to_custom_path", true, Property.NodeScope); - - /** * Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk, * this seed will be ignored and the uuid from disk will be reused. @@ -922,11 +915,7 @@ public Path resolveBaseCustomLocation(IndexSettings indexSettings) { if (customDataDir != null) { // This assert is because this should be caught by MetaDataCreateIndexService assert sharedDataPath != null; - if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) { - return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId)); - } else { - return sharedDataPath.resolve(customDataDir); - } + return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId)); } else { throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 717453d202626..c66c00728a715 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -106,11 +106,10 @@ public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unas final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index()); final Set inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id()); final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT; - final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData); assert inSyncAllocationIds.isEmpty() == false; // use in-sync allocation ids to select nodes - final NodeShardsResult nodeShardsResult = buildNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode, + final NodeShardsResult nodeShardsResult = buildNodeShardsResult(unassignedShard, snapshotRestore, allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger); final boolean enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0; logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(), @@ -122,10 +121,6 @@ public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unas logger.debug("[{}][{}]: missing local data, will restore from [{}]", unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource()); return AllocateUnassignedDecision.NOT_TAKEN; - } else if (recoverOnAnyNode) { - // let BalancedShardsAllocator take care of allocating this shard - logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id()); - return AllocateUnassignedDecision.NOT_TAKEN; } else { // We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary. // We could just be waiting for the node that holds the primary to start back up, in which case the allocation for @@ -331,19 +326,6 @@ private NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, Collections.unmodifiableList(noNodeShards)); } - /** - * Return {@code true} if the index is configured to allow shards to be - * recovered on any node - */ - private boolean recoverOnAnyNode(IndexMetaData metaData) { - // don't use the setting directly, not to trigger verbose deprecation logging - return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings)) - && (metaData.getSettings().getAsBooleanLenientForPreEs6Indices( - metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger) || - this.settings.getAsBooleanLenientForPreEs6Indices - (metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger)); - } - protected abstract FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); private static class NodeShardsResult { diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index ee35993c01e79..e528dde7179b9 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -55,7 +55,6 @@ import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; -import org.elasticsearch.index.shard.ShadowIndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardPath; @@ -343,8 +342,6 @@ public synchronized IndexShard createShard(ShardRouting routing) throws IOExcept logger.debug("creating shard_id {}", shardId); // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. - final boolean canDeleteShardContent = this.indexSettings.isOnSharedFilesystem() == false || - (primary && this.indexSettings.isOnSharedFilesystem()); final Engine.Warmer engineWarmer = (searcher) -> { IndexShard shard = getShardOrNull(shardId.getId()); if (shard != null) { @@ -352,18 +349,11 @@ public synchronized IndexShard createShard(ShardRouting routing) throws IOExcept } }; store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, - new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId))); - if (useShadowEngine(primary, this.indexSettings)) { - indexShard = new ShadowIndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, - indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, - searchOperationListeners); - // no indexing listeners - shadow engines don't index - } else { - indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, + new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); + indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, () -> globalCheckpointSyncer.accept(shardId), searchOperationListeners, indexingOperationListeners); - } eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap(); @@ -381,10 +371,6 @@ public synchronized IndexShard createShard(ShardRouting routing) throws IOExcept } } - static boolean useShadowEngine(boolean primary, IndexSettings indexSettings) { - return primary == false && indexSettings.isShadowReplicaIndex(); - } - @Override public synchronized void removeShard(int shardId, String reason) { final ShardId sId = new ShardId(index(), shardId); @@ -438,16 +424,14 @@ private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store } - private void onShardClose(ShardLock lock, boolean ownsShard) { + private void onShardClose(ShardLock lock) { if (deleted.get()) { // we remove that shards content if this index has been deleted try { - if (ownsShard) { - try { - eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); - } finally { - shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings); - eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); - } + try { + eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); + } finally { + shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings); + eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); } } catch (IOException e) { shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings); @@ -514,12 +498,10 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { private class StoreCloseListener implements Store.OnClose { private final ShardId shardId; - private final boolean ownsShard; private final Closeable[] toClose; - StoreCloseListener(ShardId shardId, boolean ownsShard, Closeable... toClose) { + StoreCloseListener(ShardId shardId, Closeable... toClose) { this.shardId = shardId; - this.ownsShard = ownsShard; this.toClose = toClose; } @@ -527,7 +509,7 @@ private class StoreCloseListener implements Store.OnClose { public void handle(ShardLock lock) { try { assert lock.getShardId().equals(shardId) : "shard id mismatch, expected: " + shardId + " but got: " + lock.getShardId(); - onShardClose(lock, ownsShard); + onShardClose(lock); } finally { try { IOUtils.close(toClose); diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 4ae16255d5e97..011229256af65 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -160,7 +160,6 @@ public final class IndexSettings { private final String nodeName; private final Settings nodeSettings; private final int numberOfShards; - private final boolean isShadowReplicaIndex; // volatile fields are updated via #updateIndexMetaData(IndexMetaData) under lock private volatile Settings settings; private volatile IndexMetaData indexMetaData; @@ -257,7 +256,6 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); - isShadowReplicaIndex = indexMetaData.isIndexUsingShadowReplicas(settings); this.defaultField = DEFAULT_FIELD_SETTING.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -359,15 +357,6 @@ public String customDataPath() { return settings.get(IndexMetaData.SETTING_DATA_PATH); } - /** - * Returns true iff the given settings indicate that the index - * associated with these settings allocates it's shards on a shared - * filesystem. - */ - public boolean isOnSharedFilesystem() { - return indexMetaData.isOnSharedFilesystem(getSettings()); - } - /** * Returns the version the index was created on. * @see Version#indexCreated(Settings) @@ -400,12 +389,6 @@ public IndexMetaData getIndexMetaData() { */ public int getNumberOfReplicas() { return settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); } - /** - * Returns true iff this index uses shadow replicas. - * @see IndexMetaData#isIndexUsingShadowReplicas(Settings) - */ - public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; } - /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java index 63861e8084956..282edaeaf73fa 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.analysis; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; @@ -78,19 +77,6 @@ public void build(final Map tokenizers, final Map searcherFacotry) throws EngineException { - // There is no translog, so we can get it directly from the searcher - return getFromSearcher(get, searcherFacotry); - } - - @Override - public Translog getTranslog() { - throw new UnsupportedOperationException("shadow engines don't have translogs"); - } - - @Override - public List segments(boolean verbose) { - try (ReleasableLock lock = readLock.acquire()) { - Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); - for (int i = 0; i < segmentsArr.length; i++) { - // hard code all segments as committed, because they are in - // order for the shadow replica to see them - segmentsArr[i].committed = true; - } - return Arrays.asList(segmentsArr); - } - } - - @Override - public void refresh(String source) throws EngineException { - // we obtain a read lock here, since we don't want a flush to happen while we are refreshing - // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - searcherManager.maybeRefreshBlocking(); - } catch (AlreadyClosedException e) { - throw e; - } catch (Exception e) { - try { - failEngine("refresh failed", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new RefreshFailedEngineException(shardId, e); - } - } - - @Override - public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException { - throw new UnsupportedOperationException("Can not take snapshot from a shadow engine"); - } - - @Override - protected SearcherManager getSearcherManager() { - return searcherManager; - } - - @Override - protected void closeNoLock(String reason) { - if (isClosed.compareAndSet(false, true)) { - try { - logger.debug("shadow replica close searcher manager refCount: {}", store.refCount()); - IOUtils.close(searcherManager); - } catch (Exception e) { - logger.warn("shadow replica failed to close searcher manager", e); - } finally { - store.decRef(); - } - } - } - - @Override - protected SegmentInfos getLastCommittedSegmentInfos() { - return lastCommittedSegmentInfos; - } - - @Override - public long getIndexBufferRAMBytesUsed() { - // No IndexWriter nor version map - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public void writeIndexingBuffer() { - // No indexing buffer - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public void activateThrottling() { - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public void deactivateThrottling() { - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public SequenceNumbersService seqNoService() { - throw new UnsupportedOperationException("ShadowEngine doesn't track sequence numbers"); - } - - @Override - public boolean isThrottled() { - return false; - } - - @Override - public long getIndexThrottleTimeInMillis() { - return 0L; - } - - @Override - public Engine recoverFromTranslog() throws IOException { - throw new UnsupportedOperationException("can't recover on a shadow engine"); - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 3855489efe379..1ab84eda6393d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -528,14 +528,10 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser if (currentToken == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); contextMapping = contextMappings.get(fieldName); - } else if (currentToken == XContentParser.Token.VALUE_STRING - || currentToken == XContentParser.Token.START_ARRAY - || currentToken == XContentParser.Token.START_OBJECT) { + } else { assert fieldName != null; assert !contextsMap.containsKey(fieldName); contextsMap.put(fieldName, contextMapping.parseContext(parseContext, parser)); - } else { - throw new IllegalArgumentException("contexts must be an object or an array , but was [" + currentToken + "]"); } } } else { diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 4045d968c5c59..f006f056f93a2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -286,9 +285,8 @@ public boolean ignoreUnmapped() { return ignoreUnmapped; } - QueryValidationException checkLatLon(boolean indexCreatedBeforeV2_0) { - // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes - if (GeoValidationMethod.isIgnoreMalformed(validationMethod) || indexCreatedBeforeV2_0) { + QueryValidationException checkLatLon() { + if (GeoValidationMethod.isIgnoreMalformed(validationMethod)) { return null; } @@ -327,15 +325,14 @@ public Query doToQuery(QueryShardContext context) { throw new QueryShardException(context, "field [" + fieldName + "] is not a geo_point field"); } - QueryValidationException exception = checkLatLon(context.indexVersionCreated().before(Version.V_2_0_0)); + QueryValidationException exception = checkLatLon(); if (exception != null) { throw new QueryShardException(context, "couldn't validate latitude/ longitude values", exception); } GeoPoint luceneTopLeft = new GeoPoint(topLeft); GeoPoint luceneBottomRight = new GeoPoint(bottomRight); - final Version indexVersionCreated = context.indexVersionCreated(); - if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) { + if (GeoValidationMethod.isCoerce(validationMethod)) { // Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for // the complete longitude range so need to set longitude to the complete longitude range double right = luceneBottomRight.getLon(); diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 95b0c46479615..41aea1be12af4 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -241,13 +240,12 @@ protected Query doToQuery(QueryShardContext shardContext) throws IOException { throw new QueryShardException(shardContext, "field [" + fieldName + "] is not a geo_point field"); } - final Version indexVersionCreated = shardContext.indexVersionCreated(); - QueryValidationException exception = checkLatLon(shardContext.indexVersionCreated().before(Version.V_2_0_0)); + QueryValidationException exception = checkLatLon(); if (exception != null) { throw new QueryShardException(shardContext, "couldn't validate latitude/ longitude values", exception); } - if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) { + if (GeoValidationMethod.isCoerce(validationMethod)) { GeoUtils.normalizePoint(center, true, true); } @@ -389,9 +387,8 @@ protected boolean doEquals(GeoDistanceQueryBuilder other) { Objects.equals(ignoreUnmapped, other.ignoreUnmapped); } - private QueryValidationException checkLatLon(boolean indexCreatedBeforeV2_0) { - // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes - if (GeoValidationMethod.isIgnoreMalformed(validationMethod) || indexCreatedBeforeV2_0) { + private QueryValidationException checkLatLon() { + if (GeoValidationMethod.isIgnoreMalformed(validationMethod)) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 32d3d4d4bf8ee..1dee58ced002b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -221,10 +221,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl */ private final AtomicBoolean active = new AtomicBoolean(); /** - * Allows for the registration of listeners that are called when a change becomes visible for search. This is nullable because - * {@linkplain ShadowIndexShard} doesn't support this. + * Allows for the registration of listeners that are called when a change becomes visible for search. */ - @Nullable private final RefreshListeners refreshListeners; public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, @@ -416,6 +414,9 @@ public void updateRoutingEntry(ShardRouting newRouting) throws IOException { // active primaries. throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); } + assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || + state == IndexShardState.CLOSED : + "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; this.shardRouting = newRouting; persistMetadata(newRouting, currentRouting); } @@ -498,6 +499,7 @@ public IndexShardState state() { * @return the previous shard state */ private IndexShardState changeState(IndexShardState newState, String reason) { + assert Thread.holdsLock(mutex); logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason); IndexShardState previousState = state; state = newState; @@ -1921,9 +1923,9 @@ public void onAfter() { } /** - * Build {@linkplain RefreshListeners} for this shard. Protected so {@linkplain ShadowIndexShard} can override it to return null. + * Build {@linkplain RefreshListeners} for this shard. */ - protected RefreshListeners buildRefreshListeners() { + private RefreshListeners buildRefreshListeners() { return new RefreshListeners( indexSettings::getMaxRefreshListeners, () -> refresh("too_many_listeners"), diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java deleted file mode 100644 index 638c2fe27838d..0000000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.shard; - -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.cache.IndexCache; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogStats; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -/** - * ShadowIndexShard extends {@link IndexShard} to add file synchronization - * from the primary when a flush happens. It also ensures that a replica being - * promoted to a primary causes the shard to fail, kicking off a re-allocation - * of the primary shard. - */ -public final class ShadowIndexShard extends IndexShard { - - public ShadowIndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, - MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, - @Nullable EngineFactory engineFactory, IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, - ThreadPool threadPool, BigArrays bigArrays, Engine.Warmer engineWarmer, - List searchOperationListeners) throws IOException { - super(shardRouting, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, - indexEventListener, wrapper, threadPool, bigArrays, engineWarmer, () -> { - }, searchOperationListeners, Collections.emptyList()); - } - - /** - * In addition to the regular accounting done in - * {@link IndexShard#updateRoutingEntry(ShardRouting)}, - * if this shadow replica needs to be promoted to a primary, the shard is - * failed in order to allow a new primary to be re-allocated. - */ - @Override - public void updateRoutingEntry(ShardRouting newRouting) throws IOException { - if (newRouting.primary()) {// becoming a primary - throw new IllegalStateException("can't promote shard to primary"); - } - super.updateRoutingEntry(newRouting); - } - - @Override - public MergeStats mergeStats() { - return new MergeStats(); - } - - @Override - public SeqNoStats seqNoStats() { - return null; - } - - @Override - public boolean canIndex() { - return false; - } - - @Override - protected Engine newEngine(EngineConfig config) { - assert this.shardRouting.primary() == false; - assert config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG; - return engineFactory.newReadOnlyEngine(config); - } - - @Override - protected RefreshListeners buildRefreshListeners() { - // ShadowEngine doesn't have a translog so it shouldn't try to support RefreshListeners. - return null; - } - - @Override - public boolean shouldFlush() { - // we don't need to flush since we don't write - all dominated by the primary - return false; - } - - @Override - public TranslogStats translogStats() { - return null; // shadow engine has no translog - } - - @Override - public void updateGlobalCheckpointOnReplica(long checkpoint) { - } - - @Override - public long getLocalCheckpoint() { - return -1; - } - - @Override - public long getGlobalCheckpoint() { - return -1; - } - - @Override - public void addRefreshListener(Translog.Location location, Consumer listener) { - throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); - } - - @Override - public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { - throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us"); - } - - @Override - protected void onNewEngine(Engine newEngine) { - // nothing to do here - the superclass sets the translog on some listeners but we don't have such a thing - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 37b728d43d622..f3597d7e5c9ea 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -331,14 +331,7 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { } else if (writtenBy == null) { throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]"); } else if (checksum == null) { - if (physicalName.startsWith("segments_") - && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) { - // its possible the checksum is null for segments_N files that belong to a shard with no data, - // so we will assign it _na_ for now and try to get the checksum from the file itself later - checksum = UNKNOWN_CHECKSUM; - } else { - throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); - } + throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); } return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize); } diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index bf8e8466dae8b..fc60543006648 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -28,7 +28,6 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.store.SimpleFSLockFactory; -import org.apache.lucene.store.SleepingLockWrapper; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -74,9 +73,6 @@ public Directory newDirectory() throws IOException { Set preLoadExtensions = new HashSet<>( indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING)); wrapped = setPreload(wrapped, location, lockFactory, preLoadExtensions); - if (indexSettings.isOnSharedFilesystem()) { - wrapped = new SleepingLockWrapper(wrapped, 5000); - } return wrapped; } diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 51516c3ddedaa..e6e46e00ac02b 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -449,7 +449,6 @@ public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaDat boolean success = false; try { assert metadata.writtenBy() != null; - assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION); output = new LuceneVerifyingIndexOutput(metadata, output); success = true; } finally { @@ -468,7 +467,6 @@ public static void verify(IndexOutput output) throws IOException { public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException { assert metadata.writtenBy() != null; - assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION); return new VerifyingIndexInput(directory().openInput(filename, context)); } @@ -813,22 +811,14 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg maxVersion = version; } for (String file : info.files()) { - if (version.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) { - checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); - } else { - throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + version); - } + checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); } } if (maxVersion == null) { - maxVersion = StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION; + maxVersion = org.elasticsearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; } final String segmentsFile = segmentCommitInfos.getSegmentsFileName(); - if (maxVersion.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) { - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); - } else { - throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + maxVersion); - } + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); } catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we either know the index is corrupted or it's just not there throw ex; diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index c284ad8313c0f..908063173c28c 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -27,12 +27,11 @@ import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; +import java.text.ParseException; import java.util.Objects; public class StoreFileMetaData implements Writeable { - public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_5_0_0; - private final String name; // the actual file size on "disk", if compressed, the compressed size @@ -44,20 +43,11 @@ public class StoreFileMetaData implements Writeable { private final BytesRef hash; - public StoreFileMetaData(String name, long length, String checksum) { - this(name, length, checksum, FIRST_LUCENE_CHECKSUM_VERSION); - } - public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) { this(name, length, checksum, writtenBy, null); } public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) { - // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the - // file is a segments_N file, but that is fine in the case of a segments_N file because - // we handle that case upstream - assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) : - "index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; this.name = Objects.requireNonNull(name, "name must not be null"); this.length = length; this.checksum = Objects.requireNonNull(checksum, "checksum must not be null"); @@ -72,8 +62,11 @@ public StoreFileMetaData(StreamInput in) throws IOException { name = in.readString(); length = in.readVLong(); checksum = in.readString(); - // TODO Why not Version.parse? - writtenBy = Lucene.parseVersionLenient(in.readString(), FIRST_LUCENE_CHECKSUM_VERSION); + try { + writtenBy = Version.parse(in.readString()); + } catch (ParseException e) { + throw new AssertionError(e); + } hash = in.readBytesRef(); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 7bf80cc19861a..bde4438158c1b 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -424,11 +424,11 @@ private synchronized IndexService createIndexService(final String reason, IndexingOperationListener... indexingOperationListeners) throws IOException { final Index index = indexMetaData.getIndex(); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting); - logger.debug("creating Index [{}], shards [{}]/[{}{}] - reason [{}]", + logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]", indexMetaData.getIndex(), idxSettings.getNumberOfShards(), idxSettings.getNumberOfReplicas(), - idxSettings.isShadowReplicaIndex() ? "s" : "", reason); + reason); final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry); for (IndexingOperationListener operationListener : indexingOperationListeners) { @@ -732,16 +732,11 @@ public void deleteShardStore(String reason, ShardId shardId, ClusterState cluste * @return true if the index can be deleted on this node */ public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings) { - // index contents can be deleted if the index is not on a shared file system, - // or if its on a shared file system but its an already closed index (so all - // its resources have already been relinquished) - if (indexSettings.isOnSharedFilesystem() == false || indexSettings.getIndexMetaData().getState() == IndexMetaData.State.CLOSE) { - final IndexService indexService = indexService(index); - if (indexService == null && nodeEnv.hasNodeFile()) { - return true; - } - } else { - logger.trace("{} skipping index directory deletion due to shadow replicas", index); + // index contents can be deleted if its an already closed index (so all its resources have + // already been relinquished) + final IndexService indexService = indexService(index); + if (indexService == null && nodeEnv.hasNodeFile()) { + return true; } return false; } @@ -789,7 +784,6 @@ public enum ShardDeletionCheckResult { FOLDER_FOUND_CAN_DELETE, // shard data exists and can be deleted STILL_ALLOCATED, // the shard is still allocated / active on this node NO_FOLDER_FOUND, // the shards data locations do not exist - SHARED_FILE_SYSTEM, // the shard is located on shared and should not be deleted NO_LOCAL_STORAGE // node does not have local storage (see DiscoveryNode.nodeRequiresLocalStorage) } @@ -802,30 +796,25 @@ public enum ShardDeletionCheckResult { public ShardDeletionCheckResult canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { assert shardId.getIndex().equals(indexSettings.getIndex()); final IndexService indexService = indexService(shardId.getIndex()); - if (indexSettings.isOnSharedFilesystem() == false) { - if (nodeEnv.hasNodeFile()) { - final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id()); - if (isAllocated) { - return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard - } else if (indexSettings.hasCustomDataPath()) { - // lets see if it's on a custom path (return false if the shared doesn't exist) - // we don't need to delete anything that is not there - return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ? + if (nodeEnv.hasNodeFile()) { + final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id()); + if (isAllocated) { + return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard + } else if (indexSettings.hasCustomDataPath()) { + // lets see if it's on a custom path (return false if the shared doesn't exist) + // we don't need to delete anything that is not there + return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ? ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE : ShardDeletionCheckResult.NO_FOLDER_FOUND; - } else { - // lets see if it's path is available (return false if the shared doesn't exist) - // we don't need to delete anything that is not there - return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ? + } else { + // lets see if it's path is available (return false if the shared doesn't exist) + // we don't need to delete anything that is not there + return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ? ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE : ShardDeletionCheckResult.NO_FOLDER_FOUND; - } - } else { - return ShardDeletionCheckResult.NO_LOCAL_STORAGE; - } + } } else { - logger.trace("{} skipping shard directory deletion due to shadow replicas", shardId); - return ShardDeletionCheckResult.SHARED_FILE_SYSTEM; + return ShardDeletionCheckResult.NO_LOCAL_STORAGE; } } diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 2307a71171411..663cdece6acb8 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -403,20 +403,6 @@ private void removeShards(final ClusterState state) { // state may result in a new shard being initialized while having the same allocation id as the currently started shard. logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); indexService.removeShard(shardId.id(), "removing shard (stale copy)"); - } else { - // remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards - if (newShardRouting.recoverySource() != null && newShardRouting.recoverySource().getType() == Type.PEER) { - RecoveryState recoveryState = shard.recoveryState(); - final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting); - if (recoveryState.getSourceNode().equals(sourceNode) == false) { - if (recoveryTargetService.cancelRecoveriesForShard(shardId, "recovery source node changed")) { - // getting here means that the shard was still recovering - logger.debug("{} removing shard (recovery source changed), current [{}], global [{}], shard [{}])", - shardId, recoveryState.getSourceNode(), sourceNode, newShardRouting); - indexService.removeShard(shardId.id(), "removing shard (recovery source node changed)"); - } - } - } } } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index e2113957690cd..93de86193b5c9 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -197,13 +197,8 @@ private RecoverySourceHandler createRecoverySourceHandler(StartRecoveryRequest r new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), targetAllocationId, transportService, request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime)); Supplier currentClusterStateVersionSupplier = () -> clusterService.state().getVersion(); - if (shard.indexSettings().isOnSharedFilesystem()) { - handler = new SharedFSRecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier, - this::delayNewRecoveries, settings); - } else { - handler = new RecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier, + handler = new RecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier, this::delayNewRecoveries, recoverySettings.getChunkSize().bytesAsInt(), settings); - } return handler; } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 631c18de97f74..a93cdd51e3842 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -126,17 +126,6 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } - /** - * Cancel all ongoing recoveries for the given shard. - * - * @param reason reason for cancellation - * @param shardId shard ID for which to cancel recoveries - * @return {@code true} if a recovery was cancelled - */ - public boolean cancelRecoveriesForShard(ShardId shardId, String reason) { - return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason); - } - public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); @@ -297,13 +286,7 @@ public RecoveryResponse newInstance() { */ private Store.MetadataSnapshot getStoreMetadataSnapshot(final RecoveryTarget recoveryTarget) { try { - if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) { - // we are not going to copy any files, so don't bother listing files, potentially running into concurrency issues with the - // primary changing files underneath us - return Store.MetadataSnapshot.EMPTY; - } else { - return recoveryTarget.indexShard().snapshotStoreMetadata(); - } + return recoveryTarget.indexShard().snapshotStoreMetadata(); } catch (final org.apache.lucene.index.IndexNotFoundException e) { // happens on an empty folder. no need to log logger.trace("{} shard folder empty, recovering all files", recoveryTarget); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java deleted file mode 100644 index fdf0de32f2f8d..0000000000000 --- a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.recovery; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.translog.Translog; - -import java.io.IOException; -import java.util.function.Function; -import java.util.function.Supplier; - -/** - * A recovery handler that skips phase one as well as sending the translog snapshot. - */ -public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { - - private final IndexShard shard; - private final StartRecoveryRequest request; - - SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request, - Supplier currentClusterStateVersionSupplier, - Function delayNewRecoveries, Settings nodeSettings) { - super(shard, recoveryTarget, request, currentClusterStateVersionSupplier, delayNewRecoveries, -1, nodeSettings); - this.shard = shard; - this.request = request; - } - - @Override - public RecoveryResponse recoverToTarget() throws IOException { - boolean engineClosed = false; - try { - logger.trace("recovery [phase1]: skipping phase1 for shared filesystem"); - final long maxUnsafeAutoIdTimestamp = shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp(); - if (request.isPrimaryRelocation()) { - logger.debug("[phase1] closing engine on primary for shared filesystem recovery"); - try { - // if we relocate we need to close the engine in order to open a new - // IndexWriter on the other end of the relocation - engineClosed = true; - shard.flushAndCloseEngine(); - } catch (IOException e) { - logger.warn("close engine failed", e); - shard.failShard("failed to close engine (phase1)", e); - } - } - prepareTargetForTranslog(0, maxUnsafeAutoIdTimestamp); - finalizeRecovery(); - return response; - } catch (Exception e) { - if (engineClosed) { - // If the relocation fails then the primary is closed and can't be - // used anymore... (because it's closed) that's a problem, so in - // that case, fail the shard to reallocate a new IndexShard and - // create a new IndexWriter - logger.info("recovery failed for primary shadow shard, failing shard"); - // pass the failure as null, as we want to ensure the store is not marked as corrupted - shard.failShard("primary relocation failed on shared filesystem", e); - } else { - logger.info("recovery failed on shared filesystem", e); - } - throw e; - } - } - - @Override - protected int sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) { - logger.trace("skipping recovery of translog snapshot on shared filesystem"); - return 0; - } - -} diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 4f0ee0e11e4f0..9c9731bc15508 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -173,9 +173,6 @@ public void clusterChanged(ClusterChangedEvent event) { case STILL_ALLOCATED: // nothing to do break; - case SHARED_FILE_SYSTEM: - // nothing to do - break; default: assert false : "unknown shard deletion check result: " + shardDeletionCheckResult; } diff --git a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java index c61ffff7311a4..5261268bd4bd3 100644 --- a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java @@ -19,6 +19,14 @@ package org.elasticsearch.node; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.env.Environment; + import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -32,14 +40,6 @@ import java.util.function.Predicate; import java.util.function.UnaryOperator; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.env.Environment; - import static org.elasticsearch.common.Strings.cleanPath; public class InternalSettingsPreparer { @@ -125,14 +125,21 @@ public static Environment prepareEnvironment(Settings input, Terminal terminal, } /** - * Initializes the builder with the given input settings, and loads system properties settings if allowed. - * If loadDefaults is true, system property default settings are loaded. + * Initializes the builder with the given input settings, and applies settings and default settings from the specified map (these + * settings typically come from the command line). The default settings are applied only if the setting does not exist in the specified + * output. + * + * @param output the settings builder to apply the input and default settings to + * @param input the input settings + * @param esSettings a map from which to apply settings and default settings */ - private static void initializeSettings(Settings.Builder output, Settings input, Map esSettings) { + static void initializeSettings(final Settings.Builder output, final Settings input, final Map esSettings) { output.put(input); output.putProperties(esSettings, - PROPERTY_DEFAULTS_PREDICATE.and(key -> output.get(STRIP_PROPERTY_DEFAULTS_PREFIX.apply(key)) == null), - STRIP_PROPERTY_DEFAULTS_PREFIX); + PROPERTY_DEFAULTS_PREDICATE + .and(key -> output.get(STRIP_PROPERTY_DEFAULTS_PREFIX.apply(key)) == null) + .and(key -> output.get(STRIP_PROPERTY_DEFAULTS_PREFIX.apply(key) + ".0") == null), + STRIP_PROPERTY_DEFAULTS_PREFIX); output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE.negate(), Function.identity()); output.replacePropertyPlaceholders(); } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 00e00b745a09b..bf65f5b94419a 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -406,6 +406,8 @@ protected Node(final Environment environment, Collection final Transport transport = networkModule.getTransportSupplier().get(); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings()); + final SearchTransportService searchTransportService = new SearchTransportService(settings, + settingsModule.getClusterSettings(), transportService); final Consumer httpBind; final HttpServerTransport httpServerTransport; if (networkModule.isHttpEnabled()) { @@ -447,8 +449,7 @@ protected Node(final Environment environment, Collection b.bind(IndicesService.class).toInstance(indicesService); b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService, threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase())); - b.bind(SearchTransportService.class).toInstance(new SearchTransportService(settings, - settingsModule.getClusterSettings(), transportService)); + b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays, scriptModule.getScriptService())); b.bind(Transport.class).toInstance(transport); diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 09f78d36b2b37..874c338ff8964 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -270,9 +270,6 @@ static Set getModuleBundles(Path modulesDirectory) throws IOException { Set bundles = new LinkedHashSet<>(); try (DirectoryStream stream = Files.newDirectoryStream(modulesDirectory)) { for (Path module : stream) { - if (FileSystemUtils.isHidden(module)) { - continue; // skip over .DS_Store etc - } PluginInfo info = PluginInfo.readFromProperties(module); Set urls = new LinkedHashSet<>(); // gather urls for jar files diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java new file mode 100644 index 0000000000000..c15b2553e5de6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoRequest; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoResponse; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public final class RestRemoteClusterInfoAction extends BaseRestHandler { + + public RestRemoteClusterInfoAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "_remote/info", this); + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) + throws IOException { + return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(RemoteInfoResponse response, XContentBuilder builder) throws Exception { + response.toXContent(builder, request); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + @Override + public boolean canTripCircuitBreaker() { + return false; + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 8ce4ec0f8dc9a..e74510012383e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -190,18 +190,10 @@ private Table buildTable(RestRequest request, ClusterStateResponse state, Indice table.addCell(shard.id()); IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index()); - boolean usesShadowReplicas = false; - if (indexMeta != null) { - usesShadowReplicas = indexMeta.isIndexUsingShadowReplicas(); - } if (shard.primary()) { table.addCell("p"); } else { - if (usesShadowReplicas) { - table.addCell("s"); - } else { - table.addCell("r"); - } + table.addCell("r"); } table.addCell(shard.state()); table.addCell(commonStats == null ? null : commonStats.getDocs().getCount()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java index 7f10a1a892981..181c9704e3e49 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java @@ -31,7 +31,7 @@ * Result of the {@link NestedAggregator}. */ public class InternalNested extends InternalSingleBucketAggregation implements Nested { - public InternalNested(String name, long docCount, InternalAggregations aggregations, List pipelineAggregators, + InternalNested(String name, long docCount, InternalAggregations aggregations, List pipelineAggregators, Map metaData) { super(name, docCount, aggregations, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 3df4b28993bbc..004c88d43f0e0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -44,14 +44,14 @@ import java.util.List; import java.util.Map; -public class NestedAggregator extends SingleBucketAggregator { +class NestedAggregator extends SingleBucketAggregator { static final ParseField PATH_FIELD = new ParseField("path"); private final BitSetProducer parentFilter; private final Query childFilter; - public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper, + NestedAggregator(String name, AggregatorFactories factories, ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper, SearchContext context, Aggregator parentAggregator, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parentAggregator, pipelineAggregators, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java index 0ca0ef0a71e91..b491bf8ff0dc4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java @@ -32,12 +32,12 @@ import java.util.List; import java.util.Map; -public class NestedAggregatorFactory extends AggregatorFactory { +class NestedAggregatorFactory extends AggregatorFactory { private final ObjectMapper parentObjectMapper; private final ObjectMapper childObjectMapper; - public NestedAggregatorFactory(String name, ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper, + NestedAggregatorFactory(String name, ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index 5c76328610dce..4edde7f9bc6d8 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -19,26 +19,18 @@ package org.elasticsearch.search.fetch.subphase; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreScorer; -import org.apache.lucene.search.ConstantScoreWeight; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocValuesTermsQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; -import org.apache.lucene.util.BitSet; +import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; @@ -48,9 +40,9 @@ import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -131,7 +123,8 @@ public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContex } BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query childFilter = childObjectMapper.nestedTypeFilter(); - Query q = Queries.filtered(query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext)); + int parentDocId = hitContext.readerContext().docBase + hitContext.docId(); + Query q = Queries.filtered(query(), new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId)); if (size() == 0) { return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0); @@ -156,120 +149,6 @@ public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContex } } - // A filter that only emits the nested children docs of a specific nested parent doc - static class NestedChildrenQuery extends Query { - - private final BitSetProducer parentFilter; - private final Query childFilter; - private final int docId; - private final LeafReader leafReader; - - NestedChildrenQuery(BitSetProducer parentFilter, Query childFilter, FetchSubPhase.HitContext hitContext) { - this.parentFilter = parentFilter; - this.childFilter = childFilter; - this.docId = hitContext.docId(); - this.leafReader = hitContext.readerContext().reader(); - } - - @Override - public boolean equals(Object obj) { - if (sameClassAs(obj) == false) { - return false; - } - NestedChildrenQuery other = (NestedChildrenQuery) obj; - return parentFilter.equals(other.parentFilter) - && childFilter.equals(other.childFilter) - && docId == other.docId - && leafReader.getCoreCacheKey() == other.leafReader.getCoreCacheKey(); - } - - @Override - public int hashCode() { - int hash = classHash(); - hash = 31 * hash + parentFilter.hashCode(); - hash = 31 * hash + childFilter.hashCode(); - hash = 31 * hash + docId; - hash = 31 * hash + leafReader.getCoreCacheKey().hashCode(); - return hash; - } - - @Override - public String toString(String field) { - return "NestedChildren(parent=" + parentFilter + ",child=" + childFilter + ")"; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - final Weight childWeight = childFilter.createWeight(searcher, false); - return new ConstantScoreWeight(this) { - @Override - public Scorer scorer(LeafReaderContext context) throws IOException { - // Nested docs only reside in a single segment, so no need to evaluate all segments - if (!context.reader().getCoreCacheKey().equals(leafReader.getCoreCacheKey())) { - return null; - } - - // If docId == 0 then we a parent doc doesn't have child docs, because child docs are stored - // before the parent doc and because parent doc is 0 we can safely assume that there are no child docs. - if (docId == 0) { - return null; - } - - final BitSet parents = parentFilter.getBitSet(context); - final int firstChildDocId = parents.prevSetBit(docId - 1) + 1; - // A parent doc doesn't have child docs, so we can early exit here: - if (firstChildDocId == docId) { - return null; - } - - final Scorer childrenScorer = childWeight.scorer(context); - if (childrenScorer == null) { - return null; - } - DocIdSetIterator childrenIterator = childrenScorer.iterator(); - final DocIdSetIterator it = new DocIdSetIterator() { - - int doc = -1; - - @Override - public int docID() { - return doc; - } - - @Override - public int nextDoc() throws IOException { - return advance(doc + 1); - } - - @Override - public int advance(int target) throws IOException { - target = Math.max(firstChildDocId, target); - if (target >= docId) { - // We're outside the child nested scope, so it is done - return doc = NO_MORE_DOCS; - } else { - int advanced = childrenIterator.advance(target); - if (advanced >= docId) { - // We're outside the child nested scope, so it is done - return doc = NO_MORE_DOCS; - } else { - return doc = advanced; - } - } - } - - @Override - public long cost() { - return Math.min(childrenIterator.cost(), docId - firstChildDocId); - } - - }; - return new ConstantScoreScorer(this, score(), it); - } - }; - } - } - } public static final class ParentChildInnerHits extends BaseInnerHits { diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index a896039f7a324..ddb4edec99050 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoDistance; @@ -491,12 +490,11 @@ public static GeoDistanceSortBuilder fromXContent(QueryParseContext context, Str @Override public SortFieldAndFormat build(QueryShardContext context) throws IOException { - final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed // on 2.x created indexes GeoPoint[] localPoints = points.toArray(new GeoPoint[points.size()]); - if (!indexCreatedBeforeV2_0 && !GeoValidationMethod.isIgnoreMalformed(validation)) { + if (GeoValidationMethod.isIgnoreMalformed(validation) == false) { for (GeoPoint point : localPoints) { if (GeoUtils.isValidLatitude(point.lat()) == false) { throw new ElasticsearchParseException( diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java index 150b7bf4f907a..38e31ec92a401 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java @@ -107,21 +107,24 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params * */ @Override - public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { + public Set parseContext(ParseContext parseContext, XContentParser parser) + throws IOException, ElasticsearchParseException { final Set contexts = new HashSet<>(); Token token = parser.currentToken(); - if (token == Token.VALUE_STRING) { + if (token == Token.VALUE_STRING || token == Token.VALUE_NUMBER || token == Token.VALUE_BOOLEAN) { contexts.add(parser.text()); } else if (token == Token.START_ARRAY) { while ((token = parser.nextToken()) != Token.END_ARRAY) { - if (token == Token.VALUE_STRING) { + if (token == Token.VALUE_STRING || token == Token.VALUE_NUMBER || token == Token.VALUE_BOOLEAN) { contexts.add(parser.text()); } else { - throw new ElasticsearchParseException("context array must have string values"); + throw new ElasticsearchParseException( + "context array must have string, number or boolean values, but was [" + token + "]"); } } } else { - throw new ElasticsearchParseException("contexts must be a string or a list of strings"); + throw new ElasticsearchParseException( + "contexts must be a string, number or boolean or a list of string, number or boolean, but was [" + token + "]"); } return contexts; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java index 59f59075bd38a..51b740a3529e9 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java @@ -21,6 +21,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -98,7 +99,8 @@ public int hashCode() { private static ObjectParser CATEGORY_PARSER = new ObjectParser<>(NAME, null); static { - CATEGORY_PARSER.declareString(Builder::setCategory, new ParseField(CONTEXT_VALUE)); + CATEGORY_PARSER.declareField(Builder::setCategory, XContentParser::text, new ParseField(CONTEXT_VALUE), + ObjectParser.ValueType.VALUE); CATEGORY_PARSER.declareInt(Builder::setBoost, new ParseField(CONTEXT_BOOST)); CATEGORY_PARSER.declareBoolean(Builder::setPrefix, new ParseField(CONTEXT_PREFIX)); } @@ -108,11 +110,16 @@ public static CategoryQueryContext fromXContent(QueryParseContext context) throw XContentParser.Token token = parser.currentToken(); Builder builder = builder(); if (token == XContentParser.Token.START_OBJECT) { - CATEGORY_PARSER.parse(parser, builder, null); - } else if (token == XContentParser.Token.VALUE_STRING) { + try { + CATEGORY_PARSER.parse(parser, builder, null); + } catch(ParsingException e) { + throw new ElasticsearchParseException("category context must be a string, number or boolean"); + } + } else if (token == XContentParser.Token.VALUE_STRING || token == XContentParser.Token.VALUE_BOOLEAN + || token == XContentParser.Token.VALUE_NUMBER) { builder.setCategory(parser.text()); } else { - throw new ElasticsearchParseException("category context must be an object or string"); + throw new ElasticsearchParseException("category context must be an object, string, number or boolean"); } return builder.build(); } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index f41273662a4c8..273138bbb7969 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -109,13 +109,14 @@ public final List parseQueryContext(QueryParseContext cont List queryContexts = new ArrayList<>(); XContentParser parser = context.parser(); Token token = parser.nextToken(); - if (token == Token.START_OBJECT || token == Token.VALUE_STRING) { - queryContexts.add(fromXContent(context)); - } else if (token == Token.START_ARRAY) { + if (token == Token.START_ARRAY) { while (parser.nextToken() != Token.END_ARRAY) { queryContexts.add(fromXContent(context)); } + } else { + queryContexts.add(fromXContent(context)); } + return toInternalQueryContexts(queryContexts); } diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 35c36bee64352..c84e069f80ae7 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -33,44 +33,43 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_2_2_0; -import static org.elasticsearch.Version.V_5_0_0_alpha1; +import static org.elasticsearch.Version.V_5_3_0_UNRELEASED; +import static org.elasticsearch.Version.V_6_0_0_alpha1_UNRELEASED; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.sameInstance; public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_2_2_0.before(V_5_0_0_alpha1), is(true)); - assertThat(V_2_2_0.before(V_2_2_0), is(false)); - assertThat(V_5_0_0_alpha1.before(V_2_2_0), is(false)); + assertThat(V_5_3_0_UNRELEASED.before(V_6_0_0_alpha1_UNRELEASED), is(true)); + assertThat(V_5_3_0_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false)); + assertThat(V_6_0_0_alpha1_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false)); - assertThat(V_2_2_0.onOrBefore(V_5_0_0_alpha1), is(true)); - assertThat(V_2_2_0.onOrBefore(V_2_2_0), is(true)); - assertThat(V_5_0_0_alpha1.onOrBefore(V_2_2_0), is(false)); + assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_6_0_0_alpha1_UNRELEASED), is(true)); + assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(true)); + assertThat(V_6_0_0_alpha1_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(false)); - assertThat(V_2_2_0.after(V_5_0_0_alpha1), is(false)); - assertThat(V_2_2_0.after(V_2_2_0), is(false)); - assertThat(V_5_0_0_alpha1.after(V_2_2_0), is(true)); + assertThat(V_5_3_0_UNRELEASED.after(V_6_0_0_alpha1_UNRELEASED), is(false)); + assertThat(V_5_3_0_UNRELEASED.after(V_5_3_0_UNRELEASED), is(false)); + assertThat(V_6_0_0_alpha1_UNRELEASED.after(V_5_3_0_UNRELEASED), is(true)); - assertThat(V_2_2_0.onOrAfter(V_5_0_0_alpha1), is(false)); - assertThat(V_2_2_0.onOrAfter(V_2_2_0), is(true)); - assertThat(V_5_0_0_alpha1.onOrAfter(V_2_2_0), is(true)); + assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_6_0_0_alpha1_UNRELEASED), is(false)); + assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true)); + assertThat(V_6_0_0_alpha1_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_2_2_0, is(lessThan(V_5_0_0_alpha1))); - assertThat(V_2_2_0.compareTo(V_2_2_0), is(0)); - assertThat(V_5_0_0_alpha1, is(greaterThan(V_2_2_0))); + assertThat(V_5_3_0_UNRELEASED, is(lessThan(V_6_0_0_alpha1_UNRELEASED))); + assertThat(V_5_3_0_UNRELEASED.compareTo(V_5_3_0_UNRELEASED), is(0)); + assertThat(V_6_0_0_alpha1_UNRELEASED, is(greaterThan(V_5_3_0_UNRELEASED))); } public void testMin() { @@ -99,9 +98,11 @@ public void testMax() { public void testMinimumIndexCompatibilityVersion() { assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED.minimumIndexCompatibilityVersion()); - assertEquals(Version.V_2_0_0, Version.V_5_0_0.minimumIndexCompatibilityVersion()); - assertEquals(Version.V_2_0_0, Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion()); - assertEquals(Version.V_2_0_0, Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), + Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), + Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); } public void testVersionConstantPresent() { @@ -155,7 +156,8 @@ public void testVersionNoPresentInSettings() { public void testIndexCreatedVersion() { // an actual index has a IndexMetaData.SETTING_INDEX_UUID - final Version version = randomFrom(Version.V_2_0_0, Version.V_2_3_0, Version.V_5_0_0_alpha1); + final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2, + Version.V_5_2_0_UNRELEASED, Version.V_6_0_0_alpha1_UNRELEASED); assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build())); } @@ -230,7 +232,7 @@ public void testParseVersion() { }); assertSame(Version.CURRENT, Version.fromString(Version.CURRENT.toString())); - assertSame(Version.fromString("2.0.0-SNAPSHOT"), Version.fromString("2.0.0")); + assertEquals(Version.fromString("2.0.0-SNAPSHOT"), Version.fromId(2000099)); expectThrows(IllegalArgumentException.class, () -> { Version.fromString("5.0.0-SNAPSHOT"); @@ -325,8 +327,8 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED)); - assertFalse(isCompatible(Version.V_2_0_0, Version.V_6_0_0_alpha1_UNRELEASED)); - assertFalse(isCompatible(Version.V_2_0_0, Version.V_5_0_0)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha1_UNRELEASED)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0)); } public boolean isCompatible(Version left, Version right) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java index 24b9601ad4fe4..5786482e79e00 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java @@ -34,10 +34,10 @@ import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit; +import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -199,7 +199,8 @@ public void testStartNextScrollRetriesOnRejectionAndSucceeds() throws Exception client.scrollsToReject = randomIntBetween(0, testRequest.getMaxRetries() - 1); DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff(); action.setScroll(scrollId()); - action.startNextScroll(timeValueNanos(System.nanoTime()), 0); + TimeValue now = timeValueNanos(System.nanoTime()); + action.startNextScroll(now, now, 0); assertBusy(() -> assertEquals(client.scrollsToReject + 1, client.scrollAttempts.get())); if (listener.isDone()) { Object result = listener.get(); @@ -213,7 +214,8 @@ public void testStartNextScrollRetriesOnRejectionButFailsOnTooManyRejections() t client.scrollsToReject = testRequest.getMaxRetries() + randomIntBetween(1, 100); DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff(); action.setScroll(scrollId()); - action.startNextScroll(timeValueNanos(System.nanoTime()), 0); + TimeValue now = timeValueNanos(System.nanoTime()); + action.startNextScroll(now, now, 0); assertBusy(() -> assertEquals(testRequest.getMaxRetries() + 1, client.scrollAttempts.get())); assertBusy(() -> assertTrue(listener.isDone())); ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); @@ -438,7 +440,9 @@ public ScheduledFuture schedule(TimeValue delay, String name, Runnable comman // Set throttle to 1 request per second to make the math simpler testTask.rethrottle(1f); // Make the last batch look nearly instant but have 100 documents - action.startNextScroll(timeValueNanos(System.nanoTime()), 100); + TimeValue lastBatchStartTime = timeValueNanos(System.nanoTime()); + TimeValue now = timeValueNanos(lastBatchStartTime.nanos() + 1); + action.startNextScroll(lastBatchStartTime, now, 100); // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); @@ -451,14 +455,13 @@ public ScheduledFuture schedule(TimeValue delay, String name, Runnable comman if (randomBoolean()) { client.lastScroll.get().listener.onResponse(searchResponse); - // The delay is still 100ish seconds because there hasn't been much time between when we requested the bulk and when we got it. - assertThat(capturedDelay.get().seconds(), either(equalTo(100L)).or(equalTo(99L))); + assertEquals(99, capturedDelay.get().seconds()); } else { // Let's rethrottle between the starting the scroll and getting the response testTask.rethrottle(10f); client.lastScroll.get().listener.onResponse(searchResponse); // The delay uses the new throttle - assertThat(capturedDelay.get().seconds(), either(equalTo(10L)).or(equalTo(9L))); + assertEquals(9, capturedDelay.get().seconds()); } // Running the command ought to increment the delay counter on the task. @@ -483,7 +486,7 @@ private void bulkRetryTestCase(boolean failWithRejection) throws Exception { CountDownLatch successLatch = new CountDownLatch(1); DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff() { @Override - void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) { + void startNextScroll(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) { successLatch.countDown(); } }; @@ -574,7 +577,8 @@ public void testCancelBeforeOnBulkResponse() throws Exception { } public void testCancelBeforeStartNextScroll() throws Exception { - cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.startNextScroll(timeValueNanos(System.nanoTime()), 0)); + TimeValue now = timeValueNanos(System.nanoTime()); + cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.startNextScroll(now, now, 0)); } public void testCancelBeforeRefreshAndFinish() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java index 27d7a24312b28..7356d626c1023 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTaskTests.java @@ -168,7 +168,7 @@ public void onFailure(Exception e) { } }); - // Rethrottle on a random number of threads, on of which is this thread. + // Rethrottle on a random number of threads, one of which is this thread. Runnable test = () -> { try { int rethrottles = 0; diff --git a/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index c30e14c3020d2..4a602c11003c6 100644 --- a/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -72,7 +72,7 @@ public void testFromXContent() throws IOException { public void testToXContent() throws IOException { Build build = new Build("buildHash", "2016-11-15".toString(), true); - Version version = Version.V_2_4_5; + Version version = Version.CURRENT; MainResponse response = new MainResponse("nodeName", version, new ClusterName("clusterName"), "clusterUuid", build, true); XContentBuilder builder = XContentFactory.jsonBuilder(); response.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -81,11 +81,11 @@ public void testToXContent() throws IOException { + "\"cluster_name\":\"clusterName\"," + "\"cluster_uuid\":\"clusterUuid\"," + "\"version\":{" - + "\"number\":\"2.4.5\"," + + "\"number\":\"" + version.toString() + "\"," + "\"build_hash\":\"buildHash\"," + "\"build_date\":\"2016-11-15\"," + "\"build_snapshot\":true," - + "\"lucene_version\":\"5.5.2\"}," + + "\"lucene_version\":\"" + version.luceneVersion.toString() + "\"}," + "\"tagline\":\"You Know, for Search\"" + "}", builder.string()); } diff --git a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java index 15c735cafa683..d73b6709121da 100644 --- a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java @@ -19,8 +19,13 @@ package org.elasticsearch.action.search; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; @@ -33,25 +38,31 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.http.HttpInfo; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportConnectionListener; +import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; -import java.net.UnknownHostException; -import java.nio.channels.AlreadyConnectedException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -519,4 +530,187 @@ public void run() { } } } + + private static void installNodeStatsHandler(TransportService service, DiscoveryNode...nodes) { + service.registerRequestHandler(NodesInfoAction.NAME, NodesInfoRequest::new, ThreadPool.Names.SAME, false, false, + (request, channel) -> { + List nodeInfos = new ArrayList<>(); + int port = 80; + for (DiscoveryNode node : nodes) { + HttpInfo http = new HttpInfo(new BoundTransportAddress(new TransportAddress[]{node.getAddress()}, + new TransportAddress(node.getAddress().address().getAddress(), port++)), 100); + nodeInfos.add(new NodeInfo(node.getVersion(), Build.CURRENT, node, null, null, null, null, null, null, http, null, + null, null)); + } + channel.sendResponse(new NodesInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList())); + }); + + } + + public void testGetConnectionInfo() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService transport1 = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService transport2 = startTransport("seed_node_1", knownNodes, Version.CURRENT); + MockTransportService transport3 = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + DiscoveryNode node1 = transport1.getLocalDiscoNode(); + DiscoveryNode node2 = transport3.getLocalDiscoNode(); + DiscoveryNode node3 = transport2.getLocalDiscoNode(); + knownNodes.add(transport1.getLocalDiscoNode()); + knownNodes.add(transport3.getLocalDiscoNode()); + knownNodes.add(transport2.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + List seedNodes = Arrays.asList(node3, node1, node2); + Collections.shuffle(seedNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + int maxNumConnections = randomIntBetween(1, 5); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + seedNodes, service, maxNumConnections, n -> true)) { + // test no nodes connected + RemoteConnectionInfo remoteConnectionInfo = assertSerialization(getRemoteConnectionInfo(connection)); + assertNotNull(remoteConnectionInfo); + assertEquals(0, remoteConnectionInfo.numNodesConnected); + assertEquals(0, remoteConnectionInfo.seedNodes.size()); + assertEquals(0, remoteConnectionInfo.httpAddresses.size()); + assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); + assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); + updateSeedNodes(connection, seedNodes); + expectThrows(RemoteTransportException.class, () -> getRemoteConnectionInfo(connection)); + + for (MockTransportService s : Arrays.asList(transport1, transport2, transport3)) { + installNodeStatsHandler(s, node1, node2, node3); + } + + remoteConnectionInfo = getRemoteConnectionInfo(connection); + remoteConnectionInfo = assertSerialization(remoteConnectionInfo); + assertNotNull(remoteConnectionInfo); + assertEquals(connection.getNumNodesConnected(), remoteConnectionInfo.numNodesConnected); + assertEquals(Math.min(3, maxNumConnections), connection.getNumNodesConnected()); + assertEquals(3, remoteConnectionInfo.seedNodes.size()); + assertEquals(remoteConnectionInfo.httpAddresses.size(), Math.min(3, maxNumConnections)); + assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); + assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); + for (TransportAddress address : remoteConnectionInfo.httpAddresses) { + assertTrue("port range mismatch: " + address.getPort(), address.getPort() >= 80 && address.getPort() <= 90); + } + } + } + } + } + + public void testRemoteConnectionInfo() throws IOException { + RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats); + + RemoteConnectionInfo stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 4, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster_1", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 15)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 87)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(325)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 5, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + } + + private RemoteConnectionInfo assertSerialization(RemoteConnectionInfo info) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.CURRENT); + info.writeTo(out); + StreamInput in = out.bytes().streamInput(); + in.setVersion(Version.CURRENT); + RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(in); + assertEquals(info, remoteConnectionInfo); + assertEquals(info.hashCode(), remoteConnectionInfo.hashCode()); + return randomBoolean() ? info : remoteConnectionInfo; + } + } + + public void testRenderConnectionInfoXContent() throws IOException { + RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80)), + 4, 3, TimeValue.timeValueMinutes(30)); + stats = assertSerialization(stats); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + stats.toXContent(builder, null); + builder.endObject(); + assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"http_addresses\":[\"0.0.0.0:80\"],\"connected\":true," + + "\"num_nodes_connected\":3,\"max_connections_per_cluster\":4,\"initial_connect_timeout\":\"30m\"}}", builder.string()); + + stats = new RemoteConnectionInfo("some_other_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1), new TransportAddress(TransportAddress.META_ADDRESS,2)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80), new TransportAddress(TransportAddress.META_ADDRESS,81)), + 2, 0, TimeValue.timeValueSeconds(30)); + stats = assertSerialization(stats); + builder = XContentFactory.jsonBuilder(); + builder.startObject(); + stats.toXContent(builder, null); + builder.endObject(); + assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],\"http_addresses\":[\"0.0.0.0:80\",\"0.0.0.0:81\"]," + + "\"connected\":false,\"num_nodes_connected\":0,\"max_connections_per_cluster\":2,\"initial_connect_timeout\":\"30s\"}}", + builder.string()); + } + + private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception { + AtomicReference statsRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + connection.getConnectionInfo(new ActionListener() { + @Override + public void onResponse(RemoteConnectionInfo remoteConnectionInfo) { + statsRef.set(remoteConnectionInfo); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + latch.await(); + if (exceptionRef.get() != null) { + throw exceptionRef.get(); + } + return statsRef.get(); + } } diff --git a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java index d0f0427e71084..81ee9141e2b59 100644 --- a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java @@ -143,14 +143,14 @@ public void testGroupClusterIndices() throws IOException { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteClusterRegistered("foo")); Map> perClusterIndices = service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", - "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, i -> false); + "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"}, i -> false); String[] localIndices = perClusterIndices.computeIfAbsent(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, k -> Collections.emptyList()).toArray(new String[0]); assertNotNull(perClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY)); - assertArrayEquals(new String[]{"foo:bar", "foo"}, localIndices); + assertArrayEquals(new String[]{"foo:bar", "foo", "no*match:boo"}, localIndices); assertEquals(2, perClusterIndices.size()); - assertEquals(Arrays.asList("bar", "test"), perClusterIndices.get("cluster_1")); - assertEquals(Arrays.asList("foo:bar", "foo*"), perClusterIndices.get("cluster_2")); + assertEquals(Arrays.asList("bar", "test", "baz", "boo"), perClusterIndices.get("cluster_1")); + assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2")); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", diff --git a/core/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java b/core/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java new file mode 100644 index 0000000000000..2af2da7ba0939 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class GroupedActionListenerTests extends ESTestCase { + + public void testNotifications() throws InterruptedException { + AtomicReference> resRef = new AtomicReference<>(); + ActionListener> result = new ActionListener>() { + @Override + public void onResponse(Collection integers) { + resRef.set(integers); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + final int groupSize = randomIntBetween(10, 1000); + AtomicInteger count = new AtomicInteger(); + Collection defaults = randomBoolean() ? Collections.singletonList(-1) : + Collections.emptyList(); + GroupedActionListener listener = new GroupedActionListener<>(result, groupSize, + defaults); + int numThreads = randomIntBetween(2, 5); + Thread[] threads = new Thread[numThreads]; + CyclicBarrier barrier = new CyclicBarrier(numThreads); + for (int i = 0; i < numThreads; i++) { + threads[i] = new Thread() { + @Override + public void run() { + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + int c = 0; + while((c = count.incrementAndGet()) <= groupSize) { + listener.onResponse(c-1); + } + } + }; + threads[i].start(); + } + for (Thread t : threads) { + t.join(); + } + assertNotNull(resRef.get()); + ArrayList list = new ArrayList<>(resRef.get()); + Collections.sort(list); + int expectedSize = groupSize + defaults.size(); + assertEquals(expectedSize, resRef.get().size()); + int expectedValue = defaults.isEmpty() ? 0 : -1; + for (int i = 0; i < expectedSize; i++) { + assertEquals(Integer.valueOf(expectedValue++), list.get(i)); + } + } + + public void testFailed() { + AtomicReference> resRef = new AtomicReference<>(); + AtomicReference excRef = new AtomicReference<>(); + + ActionListener> result = new ActionListener>() { + @Override + public void onResponse(Collection integers) { + resRef.set(integers); + } + + @Override + public void onFailure(Exception e) { + excRef.set(e); + } + }; + Collection defaults = randomBoolean() ? Collections.singletonList(-1) : + Collections.emptyList(); + int size = randomIntBetween(3, 4); + GroupedActionListener listener = new GroupedActionListener<>(result, size, + defaults); + listener.onResponse(0); + IOException ioException = new IOException(); + RuntimeException rtException = new RuntimeException(); + listener.onFailure(rtException); + listener.onFailure(ioException); + if (size == 4) { + listener.onResponse(2); + } + assertNotNull(excRef.get()); + assertEquals(rtException, excRef.get()); + assertEquals(1, excRef.get().getSuppressed().length); + assertEquals(ioException, excRef.get().getSuppressed()[0]); + assertNull(resRef.get()); + listener.onResponse(1); + assertNull(resRef.get()); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 7447e9fb55994..182d2f8645d3f 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -132,33 +132,6 @@ public void testReplication() throws Exception { assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints)); } - - public void testReplicationWithShadowIndex() throws Exception { - final String index = "test"; - final ShardId shardId = new ShardId(index, "_na_", 0); - - final ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); - final long primaryTerm = state.getMetaData().index(index).primaryTerm(0); - final IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId); - final ShardRouting primaryShard = indexShardRoutingTable.primaryShard(); - - Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); - final TestReplicationOperation op = new TestReplicationOperation(request, - new TestPrimary(primaryShard, primaryTerm), listener, false, - new TestReplicaProxy(), () -> state, logger, "test"); - op.execute(); - assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); - assertThat(request.processedOnReplicas, equalTo(Collections.emptySet())); - assertTrue("listener is not marked as done", listener.isDone()); - ShardInfo shardInfo = listener.actionGet().getShardInfo(); - assertThat(shardInfo.getFailed(), equalTo(0)); - assertThat(shardInfo.getFailures(), arrayWithSize(0)); - assertThat(shardInfo.getSuccessful(), equalTo(1)); - assertThat(shardInfo.getTotal(), equalTo(indexShardRoutingTable.getSize())); - } - - public void testDemotedPrimary() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -310,7 +283,7 @@ public void testWaitForActiveShards() throws Exception { final ShardRouting primaryShard = shardRoutingTable.primaryShard(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, primaryTerm), - listener, randomBoolean(), new TestReplicaProxy(), () -> state, logger, "test"); + listener, new TestReplicaProxy(), () -> state, logger, "test"); if (passesActiveShardCheck) { assertThat(op.checkActiveShardCount(), nullValue()); @@ -519,13 +492,14 @@ public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, l class TestReplicationOperation extends ReplicationOperation { TestReplicationOperation(Request request, Primary primary, ActionListener listener, Replicas replicas, Supplier clusterStateSupplier) { - this(request, primary, listener, true, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); + this(request, primary, listener, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); } TestReplicationOperation(Request request, Primary primary, - ActionListener listener, boolean executeOnReplicas, - Replicas replicas, Supplier clusterStateSupplier, Logger logger, String opType) { - super(request, primary, listener, executeOnReplicas, replicas, clusterStateSupplier, logger, opType); + ActionListener listener, + Replicas replicas, Supplier clusterStateSupplier, + Logger logger, String opType) { + super(request, primary, listener, replicas, clusterStateSupplier, logger, opType); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index abe0e9977dd36..bf15974d3e5b9 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -497,8 +497,7 @@ action.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), crea createReplicatedOperation( Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + TransportReplicationAction.PrimaryShardReference primaryShardReference) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -550,8 +549,7 @@ action.new AsyncPrimaryAction(request, primaryShard.allocationId().getRelocation createReplicatedOperation( Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + TransportReplicationAction.PrimaryShardReference primaryShardReference) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -650,35 +648,6 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { assertEquals(0, shardFailedRequests.length); } - public void testShadowIndexDisablesReplication() throws Exception { - final String index = "test"; - final ShardId shardId = new ShardId(index, "_na_", 0); - - ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); - MetaData.Builder metaData = MetaData.builder(state.metaData()); - Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings()); - settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true); - metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings)); - state = ClusterState.builder(state).metaData(metaData).build(); - setState(clusterService, state); - AtomicBoolean executed = new AtomicBoolean(); - ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard(); - action.new AsyncPrimaryAction(new Request(shardId), primaryShard.allocationId().getId(), - createTransportChannel(new PlainActionFuture<>()), null) { - @Override - protected ReplicationOperation> createReplicatedOperation( - Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { - assertFalse(executeOnReplicas); - assertFalse(executed.getAndSet(true)); - return new NoopReplicationOperation(request, actionListener); - } - - }.run(); - assertThat(executed.get(), equalTo(true)); - } - public void testSeqNoIsSetOnPrimary() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -738,8 +707,7 @@ action.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), crea createReplicatedOperation( Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + TransportReplicationAction.PrimaryShardReference primaryShardReference) { assertIndexShardCounter(1); if (throwExceptionOnCreation) { throw new ElasticsearchException("simulated exception, during createReplicatedOperation"); @@ -1150,7 +1118,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService class NoopReplicationOperation extends ReplicationOperation> { NoopReplicationOperation(Request request, ActionListener> listener) { - super(request, null, listener, true, null, null, TransportReplicationActionTests.this.logger, "noop"); + super(request, null, listener, null, null, TransportReplicationActionTests.this.logger, "noop"); } @Override diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java index 8a00a430dbc87..07c5a7e157f69 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java @@ -79,22 +79,19 @@ public void testPositionalArgs() throws Exception { false, output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")), (foreground, pidFile, quiet, esSettings) -> {}, - "foo" - ); + "foo"); runTest( ExitCodes.USAGE, false, output -> assertThat(output, containsString("Positional arguments not allowed, found [foo, bar]")), (foreground, pidFile, quiet, esSettings) -> {}, - "foo", "bar" - ); + "foo", "bar"); runTest( ExitCodes.USAGE, false, output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")), (foreground, pidFile, quiet, esSettings) -> {}, - "-E", "foo=bar", "foo", "-E", "baz=qux" - ); + "-E", "foo=bar", "foo", "-E", "baz=qux"); } public void testThatPidFileCanBeConfigured() throws Exception { @@ -157,18 +154,25 @@ public void testElasticsearchSettings() throws Exception { assertThat(settings, hasEntry("foo", "bar")); assertThat(settings, hasEntry("baz", "qux")); }, - "-Efoo=bar", "-E", "baz=qux" - ); + "-Efoo=bar", "-E", "baz=qux"); } public void testElasticsearchSettingCanNotBeEmpty() throws Exception { runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("Setting [foo] must not be empty")), + output -> assertThat(output, containsString("setting [foo] must not be empty")), (foreground, pidFile, quiet, esSettings) -> {}, - "-E", "foo=" - ); + "-E", "foo="); + } + + public void testElasticsearchSettingCanNotBeDuplicated() throws Exception { + runTest( + ExitCodes.USAGE, + false, + output -> assertThat(output, containsString("setting [foo] already set, saw [bar] and [baz]")), + (foreground, pidFile, quiet, initialEnv) -> {}, + "-E", "foo=bar", "-E", "foo=baz"); } public void testUnknownOption() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java deleted file mode 100644 index e1aa8d1425dff..0000000000000 --- a/core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.bwcompat; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; - -import java.util.Arrays; -import java.util.Collection; - -import org.elasticsearch.Version; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.bucket.range.Range; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; - -@ESIntegTestCase.SuiteScopeTestCase -public class IpFieldBwCompatIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.merge.enabled - } - - @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("old_index") - .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_3.id) - .addMapping("type", "ip_field", "type=ip")); - assertAcked(prepareCreate("new_index") - .addMapping("type", "ip_field", "type=ip")); - - indexRandom(true, - client().prepareIndex("old_index", "type", "1").setSource("ip_field", "127.0.0.1"), - client().prepareIndex("new_index", "type", "1").setSource("ip_field", "127.0.0.1"), - client().prepareIndex("new_index", "type", "2").setSource("ip_field", "::1")); - } - - public void testSort() { - SearchResponse response = client().prepareSearch("old_index", "new_index") - .addSort(SortBuilders.fieldSort("ip_field")).get(); - assertNoFailures(response); - assertEquals(3, response.getHits().getTotalHits()); - assertEquals("::1", response.getHits().getAt(0).getSortValues()[0]); - assertEquals("127.0.0.1", response.getHits().getAt(1).getSortValues()[0]); - assertEquals("127.0.0.1", response.getHits().getAt(2).getSortValues()[0]); - } - - public void testRangeAgg() { - SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(AggregationBuilders.ipRange("ip_range").field("ip_field") - .addMaskRange("127.0.0.1/16") - .addMaskRange("::1/64")).get(); - assertNoFailures(response); - assertEquals(3, response.getHits().getTotalHits()); - Range range = response.getAggregations().get("ip_range"); - assertEquals(2, range.getBuckets().size()); - assertEquals("::1/64", range.getBuckets().get(0).getKeyAsString()); - assertEquals(3, range.getBuckets().get(0).getDocCount()); - assertEquals("127.0.0.1/16", range.getBuckets().get(1).getKeyAsString()); - assertEquals(2, range.getBuckets().get(1).getDocCount()); - } - - public void testTermsAgg() { - SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(AggregationBuilders.terms("ip_terms").field("ip_field")).get(); - assertNoFailures(response); - assertEquals(3, response.getHits().getTotalHits()); - Terms terms = response.getAggregations().get("ip_terms"); - assertEquals(2, terms.getBuckets().size()); - assertEquals(2, terms.getBucketByKey("127.0.0.1").getDocCount()); - assertEquals(1, terms.getBucketByKey("::1").getDocCount()); - } -} diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 553ab15d670d4..1d6a634a877f4 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -316,13 +316,11 @@ void assertBasicSearchWorks(String indexName) { ElasticsearchAssertions.assertNoFailures(searchRsp); assertEquals(numDocs, searchRsp.getHits().getTotalHits()); GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(indexName).get(); - Version versionCreated = Version.fromId(Integer.parseInt(getSettingsResponse.getSetting(indexName, "index.version.created"))); - if (versionCreated.onOrAfter(Version.V_2_4_0)) { - searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.existsQuery("field.with.dots")); - searchRsp = searchReq.get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertEquals(numDocs, searchRsp.getHits().getTotalHits()); - } + searchReq = client().prepareSearch(indexName) + .setQuery(QueryBuilders.existsQuery("field.with.dots")); + searchRsp = searchReq.get(); + ElasticsearchAssertions.assertNoFailures(searchRsp); + assertEquals(numDocs, searchRsp.getHits().getTotalHits()); } boolean findPayloadBoostInExplanation(Explanation expl) { diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index af8c758b5ed1b..942d7a222ecfb 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -134,53 +134,6 @@ public void testFillShardLevelInfo() { assertEquals(test1Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_1)); } - public void testFillShardsWithShadowIndices() { - final Index index = new Index("non-shadow", "0xcafe0000"); - ShardRouting s0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - s0 = ShardRoutingHelper.initialize(s0, "node1"); - s0 = ShardRoutingHelper.moveToStarted(s0); - Path i0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0"); - CommonStats commonStats0 = new CommonStats(); - commonStats0.store = new StoreStats(100); - final Index index2 = new Index("shadow", "0xcafe0001"); - ShardRouting s1 = ShardRouting.newUnassigned(new ShardId(index2, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - s1 = ShardRoutingHelper.initialize(s1, "node2"); - s1 = ShardRoutingHelper.moveToStarted(s1); - Path i1Path = createTempDir().resolve("indices").resolve(index2.getUUID()).resolve("0"); - CommonStats commonStats1 = new CommonStats(); - commonStats1.store = new StoreStats(1000); - ShardStats[] stats = new ShardStats[] { - new ShardStats(s0, new ShardPath(false, i0Path, i0Path, s0.shardId()), commonStats0 , null, null), - new ShardStats(s1, new ShardPath(false, i1Path, i1Path, s1.shardId()), commonStats1 , null, null) - }; - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder routingToPath = ImmutableOpenMap.builder(); - ClusterState state = ClusterState.builder(new ClusterName("blarg")) - .version(0) - .metaData(MetaData.builder() - .put(IndexMetaData.builder("non-shadow") - .settings(Settings.builder() - .put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0000") - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0)) - .put(IndexMetaData.builder("shadow") - .settings(Settings.builder() - .put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0001") - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0))) - .build(); - logger.info("--> calling buildShardLevelInfo with state: {}", state); - InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, state); - assertEquals(2, shardSizes.size()); - assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s0))); - assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s1))); - assertEquals(100L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s0)).longValue()); - assertEquals(0L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s1)).longValue()); - } - public void testFillDiskUsage() { ImmutableOpenMap.Builder newLeastAvaiableUsages = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder newMostAvaiableUsages = ImmutableOpenMap.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java new file mode 100644 index 0000000000000..d6c8707c1d76e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class ClusterNameExpressionResolverTests extends ESTestCase { + + private ClusterNameExpressionResolver clusterNameResolver = new ClusterNameExpressionResolver(Settings.EMPTY); + private static final Set remoteClusters = new HashSet<>(); + + static { + remoteClusters.add("cluster1"); + remoteClusters.add("cluster2"); + remoteClusters.add("totallyDifferent"); + } + + public void testExactMatch() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totallyDifferent"); + assertEquals(new HashSet<>(Arrays.asList("totallyDifferent")), new HashSet<>(clusters)); + } + + public void testNoWildCardNoMatch() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totallyDifferent2"); + assertTrue(clusters.isEmpty()); + } + + public void testWildCardNoMatch() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totally*2"); + assertTrue(clusters.isEmpty()); + } + + public void testSimpleWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "*"); + assertEquals(new HashSet<>(Arrays.asList("cluster1", "cluster2", "totallyDifferent")), new HashSet<>(clusters)); + } + + public void testSuffixWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "cluster*"); + assertEquals(new HashSet<>(Arrays.asList("cluster1", "cluster2")), new HashSet<>(clusters)); + } + + public void testPrefixWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "*Different"); + assertEquals(new HashSet<>(Arrays.asList("totallyDifferent")), new HashSet<>(clusters)); + } + + public void testMiddleWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "clu*1"); + assertEquals(new HashSet<>(Arrays.asList("cluster1")), new HashSet<>(clusters)); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java deleted file mode 100644 index 0a39168c853b6..0000000000000 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing; - - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.util.Arrays; - -public class RoutingBackwardCompatibilityTests extends ESTestCase { - - public void testBackwardCompatibility() throws Exception { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(RoutingBackwardCompatibilityTests.class - .getResourceAsStream("/org/elasticsearch/cluster/routing/shard_routes.txt"), "UTF-8"))) { - for (String line = reader.readLine(); line != null; line = reader.readLine()) { - if (line.startsWith("#")) { // comment - continue; - } - String[] parts = line.split("\t"); - assertEquals(Arrays.toString(parts), 7, parts.length); - final String index = parts[0]; - final int numberOfShards = Integer.parseInt(parts[1]); - final String type = parts[2]; - final String id = parts[3]; - final String routing = "null".equals(parts[4]) ? null : parts[4]; - final int pre20ExpectedShardId = Integer.parseInt(parts[5]); // not needed anymore - old hashing is gone - final int currentExpectedShard = Integer.parseInt(parts[6]); - - OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - for (Version version : VersionUtils.allReleasedVersions()) { - if (version.onOrAfter(Version.V_2_0_0) == false) { - // unsupported version, no need to test - continue; - } - final Settings settings = settings(version).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards) - .numberOfReplicas(randomInt(3)).build(); - MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false); - RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTable).build(); - final int shardId = operationRouting.indexShards(clusterState, index, id, routing).shardId().getId(); - assertEquals(currentExpectedShard, shardId); - } - } - } - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 74d3dda8e36fd..ee9d69a220593 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -33,11 +33,14 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -84,4 +87,69 @@ public void testStartedShardsMatching() { assertThat(shardRouting.currentNodeId(), equalTo("node2")); assertThat(shardRouting.relocatingNodeId(), nullValue()); } + + public void testRelocatingPrimariesWithInitializingReplicas() { + AllocationService allocation = createAllocationService(); + + logger.info("--> building initial cluster state"); + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId replicaId = AllocationId.newInitializing(); + boolean relocatingReplica = randomBoolean(); + if (relocatingReplica) { + replicaId = AllocationId.newRelocation(replicaId); + } + + final IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, + relocatingReplica ? Sets.newHashSet(primaryId.getId(), replicaId.getId()) : Sets.newHashSet(primaryId.getId())) + .build(); + final Index index = indexMetaData.getIndex(); + ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))) + .metaData(MetaData.builder().put(indexMetaData, false)); + + final ShardRouting relocatingPrimary = TestShardRouting.newShardRouting( + new ShardId(index, 0), "node1", "node2", true, ShardRoutingState.RELOCATING, primaryId); + final ShardRouting replica = TestShardRouting.newShardRouting( + new ShardId(index, 0), "node3", relocatingReplica ? "node4" : null, false, + relocatingReplica ? ShardRoutingState.RELOCATING : ShardRoutingState.INITIALIZING, replicaId); + + stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(relocatingPrimary.shardId()) + .addShard(relocatingPrimary) + .addShard(replica) + .build())) + .build()); + + + ClusterState state = stateBuilder.build(); + + logger.info("--> test starting of relocating primary shard with initializing / relocating replica"); + ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(relocatingPrimary.getTargetRelocatingShard())); + assertNotEquals(newState, state); + assertTrue(newState.routingTable().index("test").allPrimaryShardsActive()); + ShardRouting startedReplica = newState.routingTable().index("test").shard(0).replicaShards().get(0); + if (relocatingReplica) { + assertTrue(startedReplica.relocating()); + assertEquals(replica.currentNodeId(), startedReplica.currentNodeId()); + assertEquals(replica.relocatingNodeId(), startedReplica.relocatingNodeId()); + assertEquals(replica.allocationId().getId(), startedReplica.allocationId().getId()); + assertNotEquals(replica.allocationId().getRelocationId(), startedReplica.allocationId().getRelocationId()); + } else { + assertTrue(startedReplica.initializing()); + assertEquals(replica.currentNodeId(), startedReplica.currentNodeId()); + assertNotEquals(replica.allocationId().getId(), startedReplica.allocationId().getId()); + } + + logger.info("--> test starting of relocating primary shard together with initializing / relocating replica"); + List startedShards = new ArrayList<>(); + startedShards.add(relocatingPrimary.getTargetRelocatingShard()); + startedShards.add(relocatingReplica ? replica.getTargetRelocatingShard() : replica); + Collections.shuffle(startedShards, random()); + newState = allocation.applyStartedShards(state, startedShards); + assertNotEquals(newState, state); + assertTrue(newState.routingTable().index("test").shard(0).allShardsStarted()); + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java new file mode 100644 index 0000000000000..9044103e43b2d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +import static org.hamcrest.Matchers.containsString; + +public class AddFileKeyStoreCommandTests extends KeyStoreCommandTestCase { + @Override + protected Command newCommand() { + return new AddFileKeyStoreCommand() { + @Override + protected Environment createEnv(Terminal terminal, Map settings) { + return env; + } + }; + } + + private Path createRandomFile() throws IOException { + int length = randomIntBetween(10, 20); + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + Path file = env.configFile().resolve("randomfile"); + Files.write(file, bytes); + return file; + } + + private void addFile(KeyStoreWrapper keystore, String setting, Path file) throws Exception { + keystore.setFile(setting, Files.readAllBytes(file)); + keystore.save(env.configFile()); + } + + public void testMissing() throws Exception { + UserException e = expectThrows(UserException.class, this::execute); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertThat(e.getMessage(), containsString("keystore not found")); + } + + public void testOverwritePromptDefault() throws Exception { + Path file = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file); + terminal.addTextInput(""); + execute("foo", "path/dne"); + assertSecureFile("foo", file); + } + + public void testOverwritePromptExplicitNo() throws Exception { + Path file = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file); + terminal.addTextInput("n"); // explicit no + execute("foo", "path/dne"); + assertSecureFile("foo", file); + } + + public void testOverwritePromptExplicitYes() throws Exception { + Path file1 = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file1); + terminal.addTextInput("y"); + Path file2 = createRandomFile(); + execute("foo", file2.toString()); + assertSecureFile("foo", file2); + } + + public void testOverwriteForceShort() throws Exception { + Path file1 = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file1); + Path file2 = createRandomFile(); + execute("-f", "foo", file2.toString()); + assertSecureFile("foo", file2); + } + + public void testOverwriteForceLong() throws Exception { + Path file1 = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file1); + Path file2 = createRandomFile(); + execute("--force", "foo", file2.toString()); + assertSecureFile("foo", file2); + } + + public void testForceNonExistent() throws Exception { + createKeystore(""); + Path file = createRandomFile(); + execute("--force", "foo", file.toString()); + assertSecureFile("foo", file); + } + + public void testMissingSettingName() throws Exception { + createKeystore(""); + UserException e = expectThrows(UserException.class, this::execute); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertThat(e.getMessage(), containsString("Missing setting name")); + } + + public void testMissingFileName() throws Exception { + createKeystore(""); + UserException e = expectThrows(UserException.class, () -> execute("foo")); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertThat(e.getMessage(), containsString("Missing file name")); + } + + public void testFileDNE() throws Exception { + createKeystore(""); + UserException e = expectThrows(UserException.class, () -> execute("foo", "path/dne")); + assertEquals(ExitCodes.IO_ERROR, e.exitCode); + assertThat(e.getMessage(), containsString("File [path/dne] does not exist")); + } + + public void testExtraArguments() throws Exception { + createKeystore(""); + Path file = createRandomFile(); + UserException e = expectThrows(UserException.class, () -> execute("foo", file.toString(), "bar")); + assertEquals(e.getMessage(), ExitCodes.USAGE, e.exitCode); + assertThat(e.getMessage(), containsString("Unrecognized extra arguments [bar]")); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java index ef732c1e29cd8..11c3f107fe768 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java @@ -127,7 +127,7 @@ public void testNonAsciiValue() throws Exception { assertEquals("String value must contain only ASCII", e.getMessage()); } - public void testNpe() throws Exception { + public void testMissingSettingName() throws Exception { createKeystore(""); terminal.addTextInput(""); UserException e = expectThrows(UserException.class, this::execute); diff --git a/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java index 8584d4d155515..5d4741c72912c 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java @@ -47,7 +47,7 @@ public void testPosix() throws Exception { } public void testNotPosix() throws Exception { - setupEnv(false); + env = setupEnv(false, fileSystems); execute(); Path configDir = env.configFile(); assertNotNull(KeyStoreWrapper.load(configDir)); diff --git a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java index 1e4d24a344e61..500b7b627b840 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java @@ -20,7 +20,9 @@ package org.elasticsearch.common.settings; import java.io.IOException; +import java.io.InputStream; import java.nio.file.FileSystem; +import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -53,10 +55,10 @@ public void closeMockFileSystems() throws IOException { @Before public void setupEnv() throws IOException { - setupEnv(true); // default to posix, but tests may call setupEnv(false) to overwrite + env = setupEnv(true, fileSystems); // default to posix, but tests may call setupEnv(false) to overwrite } - void setupEnv(boolean posix) throws IOException { + static Environment setupEnv(boolean posix, List fileSystems) throws IOException { final Configuration configuration; if (posix) { configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); @@ -68,7 +70,7 @@ void setupEnv(boolean posix) throws IOException { PathUtilsForTesting.installMock(fs); // restored by restoreFileSystem in ESTestCase Path home = fs.getPath("/", "test-home"); Files.createDirectories(home.resolve("config")); - env = new Environment(Settings.builder().put("path.home", home).build()); + return new Environment(Settings.builder().put("path.home", home).build()); } KeyStoreWrapper createKeystore(String password, String... settings) throws Exception { @@ -94,4 +96,28 @@ void assertSecureString(String setting, String value) throws Exception { void assertSecureString(KeyStoreWrapper keystore, String setting, String value) throws Exception { assertEquals(value, keystore.getString(setting).toString()); } + + void assertSecureFile(String setting, Path file) throws Exception { + assertSecureFile(loadKeystore(""), setting, file); + } + + void assertSecureFile(KeyStoreWrapper keystore, String setting, Path file) throws Exception { + byte[] expectedBytes = Files.readAllBytes(file); + try (InputStream input = keystore.getFile(setting)) { + for (int i = 0; i < expectedBytes.length; ++i) { + int got = input.read(); + int expected = Byte.toUnsignedInt(expectedBytes[i]); + if (got < 0) { + fail("Got EOF from keystore stream at position " + i + " but expected 0x" + Integer.toHexString(expected)); + } + assertEquals("Byte " + i, expected, got); + } + int eof = input.read(); + if (eof != -1) { + fail("Found extra bytes in file stream from keystore, expected " + expectedBytes.length + + " bytes but found 0x" + Integer.toHexString(eof)); + } + } + + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java new file mode 100644 index 0000000000000..0b42eb59f827f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.FileSystem; +import java.util.ArrayList; +import java.util.List; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +public class KeyStoreWrapperTests extends ESTestCase { + + Environment env; + List fileSystems = new ArrayList<>(); + + @After + public void closeMockFileSystems() throws IOException { + IOUtils.close(fileSystems); + } + + @Before + public void setupEnv() throws IOException { + env = KeyStoreCommandTestCase.setupEnv(true, fileSystems); + } + + public void testFileSettingExhaustiveBytes() throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.create(new char[0]); + byte[] bytes = new byte[256]; + for (int i = 0; i < 256; ++i) { + bytes[i] = (byte)i; + } + keystore.setFile("foo", bytes); + keystore.save(env.configFile()); + keystore = KeyStoreWrapper.load(env.configFile()); + keystore.decrypt(new char[0]); + try (InputStream stream = keystore.getFile("foo")) { + for (int i = 0; i < 256; ++i) { + int got = stream.read(); + if (got < 0) { + fail("Expected 256 bytes but read " + i); + } + assertEquals(i, got); + } + assertEquals(-1, stream.read()); // nothing left + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index c1dc07116ecf1..6eec34a90e981 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -562,4 +562,16 @@ public void testSecureSettingConflict() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings)); assertTrue(e.getMessage().contains("must be stored inside the Elasticsearch keystore")); } + + public void testGetAsArrayFailsOnDuplicates() { + final Settings settings = + Settings.builder() + .put("foobar.0", "bar") + .put("foobar.1", "baz") + .put("foobar", "foo") + .build(); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> settings.getAsArray("foobar")); + assertThat(e, hasToString(containsString("settings object contains values for [foobar=foo] and [foobar.0=bar]"))); + } + } diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index bf10c117b1336..308ae0eb35079 100644 --- a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -68,14 +68,13 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testUpgradeCustomDataPath() throws IOException { Path customPath = createTempDir(); final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -97,14 +96,13 @@ public void testUpgradeCustomDataPath() throws IOException { public void testPartialUpgradeCustomDataPath() throws IOException { Path customPath = createTempDir(); final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -136,14 +134,13 @@ void upgrade(Index index, Path source, Path target) throws IOException { } public void testUpgrade() throws IOException { - final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + final Settings nodeSettings = Settings.EMPTY; try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); @@ -159,8 +156,7 @@ public void testUpgrade() throws IOException { } public void testUpgradeIndices() throws IOException { - final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + final Settings nodeSettings = Settings.EMPTY; try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { Map> indexSettingsMap = new HashMap<>(); for (int i = 0; i < randomIntBetween(2, 5); i++) { @@ -168,7 +164,7 @@ public void testUpgradeIndices() throws IOException { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 9c11ae6b23ff6..f067212caafe9 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -380,11 +380,10 @@ public void testCustomDataPaths() throws Exception { assertThat("index paths uses the regular template", env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID()))); - IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), - Settings.builder().put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false).build()); + IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build()); assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid))); - assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/" + index.getUUID() + "/0"))); + assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0"))); assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 6f79922075bf9..d47221f9e31d6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -330,26 +330,10 @@ public void testIndexDeletionWhenNodeRejoins() throws Exception { final int numNodes = 2; final List nodes; - if (randomBoolean()) { - // test with a regular index - logger.info("--> starting a cluster with " + numNodes + " nodes"); - nodes = internalCluster().startNodes(numNodes); - logger.info("--> create an index"); - createIndex(indexName); - } else { - // test with a shadow replica index - final Path dataPath = createTempDir(); - logger.info("--> created temp data path for shadow replicas [{}]", dataPath); - logger.info("--> starting a cluster with " + numNodes + " nodes"); - final Settings nodeSettings = Settings.builder() - .put("node.add_lock_id_to_custom_path", false) - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString()) - .put("index.store.fs.fs_lock", randomFrom("native", "simple")) - .build(); - nodes = internalCluster().startNodes(numNodes, nodeSettings); - logger.info("--> create a shadow replica index"); - createShadowReplicaIndex(indexName, dataPath, numNodes - 1); - } + logger.info("--> starting a cluster with " + numNodes + " nodes"); + nodes = internalCluster().startNodes(numNodes); + logger.info("--> create an index"); + createIndex(indexName); logger.info("--> waiting for green status"); ensureGreen(); @@ -535,23 +519,4 @@ public void testArchiveBrokenClusterSettings() throws Exception { + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey())); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } - - - /** - * Creates a shadow replica index and asserts that the index creation was acknowledged. - * Can only be invoked on a cluster where each node has been configured with shared data - * paths and the other necessary settings for shadow replicas. - */ - private void createShadowReplicaIndex(final String name, final Path dataPath, final int numReplicas) { - assert Files.exists(dataPath); - assert numReplicas >= 0; - final Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .build(); - assertAcked(prepareCreate(name).setSettings(idxSettings).get()); - } - } diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index ca330d9d9df13..c09c92a70414e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -72,16 +72,6 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { private final DiscoveryNode node3 = newNode("node3"); private TestAllocator testAllocator; - - /** - * needed due to random usage of {@link IndexMetaData#INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING}. removed once - * shadow replicas are removed. - */ - @Override - protected boolean enableWarningsCheck() { - return false; - } - @Before public void buildTestAllocator() { this.testAllocator = new TestAllocator(); @@ -401,79 +391,6 @@ private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocat return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } - /** - * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation - * deciders say yes, we allocate to that node. - */ - public void testRecoverOnAnyNode() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), "allocId"); - testAllocator.addData(node1, "allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.RED); - } - - /** - * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation - * deciders say throttle, we add it to ignored shards. - */ - public void testRecoverOnAnyNodeThrottle() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(throttleAllocationDeciders(), "allocId"); - testAllocator.addData(node1, "allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); - } - - /** - * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation - * deciders say no, we still allocate to that node. - */ - public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders(), "allocId"); - testAllocator.addData(node1, "allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.RED); - } - - /** - * Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let - * BalancedShardAllocator assign the shard - */ - public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), "allocId"); - testAllocator.addData(node1, null, randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(false)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); - } - - private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders, String... allocIds) { - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)) - .numberOfShards(1).numberOfReplicas(0).putInSyncAllocationIds(0, Sets.newHashSet(allocIds))) - .build(); - - RoutingTable routingTable = RoutingTable.builder() - .addAsRestore(metaData.index(shardId.getIndex()), new SnapshotRecoverySource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName())) - .build(); - ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData) - .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); - return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); - } - private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, UnassignedInfo.Reason reason, String... activeAllocationIds) { MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index a5cf35105c22c..775f7e8f1b590 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -65,6 +65,8 @@ import static org.hamcrest.Matchers.equalTo; public class ReplicaShardAllocatorTests extends ESAllocationTestCase { + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; private final ShardId shardId = new ShardId("test", "_na_", 0); private final DiscoveryNode node1 = newNode("node1"); private final DiscoveryNode node2 = newNode("node2"); @@ -119,8 +121,8 @@ public void testAsyncFetchOnAnythingButIndexCreation() { public void testSimpleFullMatchAllocation() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -132,8 +134,8 @@ public void testSimpleFullMatchAllocation() { public void testSyncIdMatch() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM" ,MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -145,8 +147,8 @@ public void testSyncIdMatch() { public void testFileChecksumMatch() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -160,7 +162,7 @@ public void testFileChecksumMatch() { */ public void testNoPrimaryData() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -172,7 +174,7 @@ public void testNoPrimaryData() { */ public void testNoDataForReplicaOnAnyNode() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -184,8 +186,8 @@ public void testNoDataForReplicaOnAnyNode() { */ public void testNoMatchingFilesForReplicaOnAnyNode() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -197,8 +199,8 @@ public void testNoMatchingFilesForReplicaOnAnyNode() { */ public void testNoOrThrottleDecidersRemainsInUnassigned() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -222,8 +224,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return Decision.YES; } }))); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -231,8 +233,9 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing public void testDelayedAllocation() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), - Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)) + .build(), UnassignedInfo.Reason.NODE_LEFT); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); if (randomBoolean()) { // we sometime return empty list of files, make sure we test this as well testAllocator.addData(node2, null); @@ -244,7 +247,7 @@ public void testDelayedAllocation() { allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); - testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -253,9 +256,9 @@ public void testDelayedAllocation() { public void testCancelRecoveryBetterSyncId() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node3, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.processExistingRecoveries(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); @@ -264,9 +267,10 @@ public void testCancelRecoveryBetterSyncId() { public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node3, randomBoolean() ? "MATCH" : "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, randomBoolean() ? "MATCH" : "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", + MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.processExistingRecoveries(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); @@ -274,8 +278,8 @@ public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { public void testNotCancellingRecovery() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.processExistingRecoveries(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 80e453d665e3d..385770426f5ae 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -47,36 +47,6 @@ /** Unit test(s) for IndexService */ public class IndexServiceTests extends ESSingleNodeTestCase { - public void testDetermineShadowEngineShouldBeUsed() { - IndexSettings regularSettings = new IndexSettings( - IndexMetaData - .builder("regular") - .settings(Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 2) - .put(SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()) - .build(), - Settings.EMPTY); - - IndexSettings shadowSettings = new IndexSettings( - IndexMetaData - .builder("shadow") - .settings(Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 2) - .put(SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()) - .build(), - Settings.EMPTY); - - assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(true, regularSettings)); - assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(false, regularSettings)); - assertFalse("no shadow replicas for primary shard with shadow settings", IndexService.useShadowEngine(true, shadowSettings)); - assertTrue("shadow replicas for replica shards with shadow settings",IndexService.useShadowEngine(false, shadowSettings)); - } - public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java deleted file mode 100644 index 2bf9f0efbfd9d..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ /dev/null @@ -1,905 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShadowIndexShard; -import org.elasticsearch.index.store.FsDirectoryService; -import org.elasticsearch.index.translog.TranslogStats; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.snapshots.SnapshotState; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -/** - * Tests for indices that use shadow replicas and a shared filesystem - */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) -public class IndexWithShadowReplicasIT extends ESIntegTestCase { - - private Settings nodeSettings(Path dataPath) { - return nodeSettings(dataPath.toString()); - } - - private Settings nodeSettings(String dataPath) { - return Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false) - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath) - .put(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING.getKey(), randomFrom("native", "simple")) - .build(); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); - } - - public void testCannotCreateWithBadPath() throws Exception { - Settings nodeSettings = nodeSettings("/badpath"); - internalCluster().startNodes(1, nodeSettings); - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, "/etc/foo") - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); - try { - assertAcked(prepareCreate("foo").setSettings(idxSettings)); - fail("should have failed"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage(), - e.getMessage().contains("custom path [/etc/foo] is not a sub-path of path.shared_data")); - } - } - - /** - * Tests the case where we create an index without shadow replicas, snapshot it and then restore into - * an index with shadow replicas enabled. - */ - public void testRestoreToShadow() throws ExecutionException, InterruptedException { - final Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(3, nodeSettings); - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); - assertAcked(prepareCreate("foo").setSettings(idxSettings)); - ensureGreen(); - final int numDocs = randomIntBetween(10, 100); - for (int i = 0; i < numDocs; i++) { - client().prepareIndex("foo", "doc", ""+i).setSource("foo", "bar").get(); - } - assertNoFailures(client().admin().indices().prepareFlush().setForce(true).execute().actionGet()); - - assertAcked(client().admin().cluster().preparePutRepository("test-repo") - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()))); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("foo").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - - Settings shadowSettings = Settings.builder() - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).build(); - - logger.info("--> restore the index into shadow replica index"); - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") - .setIndexSettings(shadowSettings).setWaitForCompletion(true) - .setRenamePattern("(.+)").setRenameReplacement("$1-copy") - .execute().actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - ensureGreen(); - refresh(); - Index index = resolveIndex("foo-copy"); - for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { - - if (service.hasIndex(index)) { - IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0); - if (shard.routingEntry().primary()) { - assertFalse(shard instanceof ShadowIndexShard); - } else { - assertTrue(shard instanceof ShadowIndexShard); - } - } - } - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch("foo-copy").setQuery(matchAllQuery()).get(); - assertHitCount(resp, numDocs); - - } - - @TestLogging("org.elasticsearch.gateway:TRACE") - public void testIndexWithFewDocuments() throws Exception { - final Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(3, nodeSettings); - final String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - // So basically, the primary should fail and the replica will need to - // replay the translog, this is what this tests - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - - IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); - assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); - assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); - Index index = resolveIndex(IDX); - for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService(index); - if (indexService != null) { - IndexShard shard = indexService.getShard(0); - TranslogStats translogStats = shard.translogStats(); - assertTrue(translogStats != null || shard instanceof ShadowIndexShard); - if (translogStats != null) { - assertEquals(2, translogStats.estimatedNumberOfOperations()); - } - } - } - - // Check that we can get doc 1 and 2, because we are doing realtime - // gets and getting from the primary - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - flushAndRefresh(IDX); - client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); - refresh(); - - // Check that we can get doc 1 and 2 without realtime - gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get(); - gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get(); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - logger.info("--> restarting all nodes"); - if (randomBoolean()) { - logger.info("--> rolling restart"); - internalCluster().rollingRestart(); - } else { - logger.info("--> full restart"); - internalCluster().fullRestart(); - } - - client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); - ensureGreen(IDX); - flushAndRefresh(IDX); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 4); - - logger.info("--> deleting index"); - assertAcked(client().admin().indices().prepareDelete(IDX)); - } - - public void testReplicaToPrimaryPromotion() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - String node1 = internalCluster().startNode(nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - // Node1 has the primary, now node2 has the replica - internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); - flushAndRefresh(IDX); - - logger.info("--> stopping node1 [{}]", node1); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); - ensureClusterSizeConsistency(); // wait for the new node to be elected and process the node leave - ensureYellow(IDX); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 2); - - gResp1 = client().prepareGet(IDX, "doc", "1").get(); - gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.toString(), gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "foobar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "foobar").get(); - gResp1 = client().prepareGet(IDX, "doc", "1").get(); - gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.toString(), gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("foobar")); - assertThat(gResp2.getSource().get("foo"), equalTo("foobar")); - } - - public void testPrimaryRelocation() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - String node1 = internalCluster().startNode(nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); - flushAndRefresh(IDX); - - // now prevent primary from being allocated on node 1 move to node_3 - String node3 = internalCluster().startNode(nodeSettings); - Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); - client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); - - ensureGreen(IDX); - // check if primary has relocated to node3 - assertEquals(internalCluster().clusterService(node3).localNode().getId(), - client().admin().cluster().prepareState().get().getState().routingTable().index(IDX).shard(0).primaryShard().currentNodeId()); - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 2); - - gResp1 = client().prepareGet(IDX, "doc", "1").get(); - gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.toString(), gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); - gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").get(); - gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - } - - public void testPrimaryRelocationWithConcurrentIndexing() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - String node1 = internalCluster().startNode(nodeSettings); - final String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - flushAndRefresh(IDX); - String node3 = internalCluster().startNode(nodeSettings); - final AtomicInteger counter = new AtomicInteger(0); - final CountDownLatch started = new CountDownLatch(1); - - final int numPhase1Docs = scaledRandomIntBetween(25, 200); - final int numPhase2Docs = scaledRandomIntBetween(25, 200); - final CountDownLatch phase1finished = new CountDownLatch(1); - final CountDownLatch phase2finished = new CountDownLatch(1); - final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); - Thread thread = new Thread() { - @Override - public void run() { - started.countDown(); - while (counter.get() < (numPhase1Docs + numPhase2Docs)) { - try { - final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", - Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - } catch (Exception e) { - exceptions.add(e); - } - final int docCount = counter.get(); - if (docCount == numPhase1Docs) { - phase1finished.countDown(); - } - } - logger.info("--> stopping indexing thread"); - phase2finished.countDown(); - } - }; - thread.start(); - started.await(); - phase1finished.await(); // wait for a certain number of documents to be indexed - logger.info("--> excluding {} from allocation", node1); - // now prevent primary from being allocated on node 1 move to node_3 - Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); - client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); - // wait for more documents to be indexed post-recovery, also waits for - // indexing thread to stop - phase2finished.await(); - ExceptionsHelper.rethrowAndSuppress(exceptions); - ensureGreen(IDX); - thread.join(); - logger.info("--> performing query"); - flushAndRefresh(); - - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, counter.get()); - assertHitCount(resp, numPhase1Docs + numPhase2Docs); - } - - public void testPrimaryRelocationWhereRecoveryFails() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = Settings.builder() - .put("node.add_lock_id_to_custom_path", false) - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath) - .build(); - - String node1 = internalCluster().startNode(nodeSettings); - final String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - flushAndRefresh(IDX); - String node3 = internalCluster().startNode(nodeSettings); - final AtomicInteger counter = new AtomicInteger(0); - final CountDownLatch started = new CountDownLatch(1); - - final int numPhase1Docs = scaledRandomIntBetween(25, 200); - final int numPhase2Docs = scaledRandomIntBetween(25, 200); - final int numPhase3Docs = scaledRandomIntBetween(25, 200); - final CountDownLatch phase1finished = new CountDownLatch(1); - final CountDownLatch phase2finished = new CountDownLatch(1); - final CountDownLatch phase3finished = new CountDownLatch(1); - - final AtomicBoolean keepFailing = new AtomicBoolean(true); - - MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, node1)); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, node3), - new MockTransportService.DelegateTransport(mockTransportService.original()) { - - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - if (keepFailing.get() && action.equals(PeerRecoveryTargetService.Actions.TRANSLOG_OPS)) { - logger.info("--> failing translog ops"); - throw new ElasticsearchException("failing on purpose"); - } - super.sendRequest(connection, requestId, action, request, options); - } - }); - - Thread thread = new Thread() { - @Override - public void run() { - started.countDown(); - while (counter.get() < (numPhase1Docs + numPhase2Docs + numPhase3Docs)) { - final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", - Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - final int docCount = counter.get(); - if (docCount == numPhase1Docs) { - phase1finished.countDown(); - } else if (docCount == (numPhase1Docs + numPhase2Docs)) { - phase2finished.countDown(); - } - } - logger.info("--> stopping indexing thread"); - phase3finished.countDown(); - } - }; - thread.start(); - started.await(); - phase1finished.await(); // wait for a certain number of documents to be indexed - logger.info("--> excluding {} from allocation", node1); - // now prevent primary from being allocated on node 1 move to node_3 - Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); - client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); - // wait for more documents to be indexed post-recovery, also waits for - // indexing thread to stop - phase2finished.await(); - // stop failing - keepFailing.set(false); - // wait for more docs to be indexed - phase3finished.await(); - ensureGreen(IDX); - thread.join(); - logger.info("--> performing query"); - flushAndRefresh(); - - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, counter.get()); - } - - public void testIndexWithShadowReplicasCleansUp() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - final int nodeCount = randomIntBetween(2, 5); - logger.info("--> starting {} nodes", nodeCount); - final List nodes = internalCluster().startNodes(nodeCount, nodeSettings); - final String IDX = "test"; - final Tuple numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount); - final int numPrimaries = numPrimariesAndReplicas.v1(); - final int numReplicas = numPrimariesAndReplicas.v2(); - logger.info("--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas); - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - flushAndRefresh(IDX); - - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 2); - - logger.info("--> deleting index " + IDX); - assertAcked(client().admin().indices().prepareDelete(IDX)); - assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); - assertPathHasBeenCleared(dataPath); - //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved. - //assertIndicesDirsDeleted(nodes); - } - - /** - * Tests that shadow replicas can be "naturally" rebalanced and relocated - * around the cluster. By "naturally" I mean without using the reroute API - */ - // This test failed on CI when trying to assert that all the shard data has been deleted - // from the index path. It has not been reproduced locally. Despite the IndicesService - // deleting the index and hence, deleting all the shard data for the index, the test - // failure still showed some Lucene files in the data directory for that index. Not sure - // why that is, so turning on more logging here. - @TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE,_root:DEBUG") - public void testShadowReplicaNaturalRelocation() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - final List nodes = internalCluster().startNodes(2, nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - int docCount = randomIntBetween(10, 100); - List builders = new ArrayList<>(); - for (int i = 0; i < docCount; i++) { - builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar")); - } - indexRandom(true, true, true, builders); - flushAndRefresh(IDX); - - // start a third node, with 5 shards each on the other nodes, they - // should relocate some to the third node - final String node3 = internalCluster().startNode(nodeSettings); - nodes.add(node3); - - assertBusy(new Runnable() { - @Override - public void run() { - client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - RoutingNodes nodes = resp.getState().getRoutingNodes(); - for (RoutingNode node : nodes) { - logger.info("--> node has {} shards (needs at least 2)", node.numberOfOwningShards()); - assertThat("at least 2 shards on node", node.numberOfOwningShards(), greaterThanOrEqualTo(2)); - } - } - }); - ensureYellow(IDX); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, docCount); - - assertAcked(client().admin().indices().prepareDelete(IDX)); - assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); - assertPathHasBeenCleared(dataPath); - //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved. - //assertIndicesDirsDeleted(nodes); - } - - public void testShadowReplicasUsingFieldData() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(3, nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get(); - ensureGreen(IDX); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get(); - flushAndRefresh(IDX); - - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addDocValueField("foo").addSort("foo", SortOrder.ASC).get(); - assertHitCount(resp, 4); - assertOrderedSearchHits(resp, "2", "3", "4", "1"); - SearchHit[] hits = resp.getHits().getHits(); - assertThat(hits[0].field("foo").getValue().toString(), equalTo("bar")); - assertThat(hits[1].field("foo").getValue().toString(), equalTo("baz")); - assertThat(hits[2].field("foo").getValue().toString(), equalTo("eggplant")); - assertThat(hits[3].field("foo").getValue().toString(), equalTo("foo")); - } - - /** wait until none of the nodes have shards allocated on them */ - private void assertNoShardsOn(final List nodeList) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - RoutingNodes nodes = resp.getState().getRoutingNodes(); - for (RoutingNode node : nodes) { - logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards()); - if (nodeList.contains(node.node().getName())) { - assertThat("no shards on node", node.numberOfOwningShards(), equalTo(0)); - } - } - } - }, 1, TimeUnit.MINUTES); - } - - /** wait until the node has the specified number of shards allocated on it */ - private void assertShardCountOn(final String nodeName, final int shardCount) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - RoutingNodes nodes = resp.getState().getRoutingNodes(); - for (RoutingNode node : nodes) { - logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards()); - if (nodeName.equals(node.node().getName())) { - assertThat(node.numberOfOwningShards(), equalTo(shardCount)); - } - } - } - }, 1, TimeUnit.MINUTES); - } - - public void testIndexOnSharedFSRecoversToAnyNode() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - Settings fooSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "foo").build(); - Settings barSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "bar").build(); - - List allNodes = internalCluster().startNodes(fooSettings, fooSettings, barSettings, barSettings); - List fooNodes = allNodes.subList(0, 2); - List barNodes = allNodes.subList(2, 4); - String IDX = "test"; - - Settings includeFoo = Settings.builder() - .put("index.routing.allocation.include.affinity", "foo") - .build(); - Settings includeBar = Settings.builder() - .put("index.routing.allocation.include.affinity", "bar") - .build(); - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true) - .put(includeFoo) // start with requiring the shards on "foo" - .build(); - - // only one node, so all primaries will end up on node1 - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get(); - ensureGreen(IDX); - - // Index some documents - client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get(); - flushAndRefresh(IDX); - - // put shards on "bar" - client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get(); - - // wait for the shards to move from "foo" nodes to "bar" nodes - assertNoShardsOn(fooNodes); - - // put shards back on "foo" - client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get(); - - // wait for the shards to move from "bar" nodes to "foo" nodes - assertNoShardsOn(barNodes); - - // Stop a foo node - logger.info("--> stopping first 'foo' node"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(0))); - - // Ensure that the other foo node has all the shards now - assertShardCountOn(fooNodes.get(1), 5); - - // Assert no shards on the "bar" nodes - assertNoShardsOn(barNodes); - - // Stop the second "foo" node - logger.info("--> stopping second 'foo' node"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(1))); - - // The index should still be able to be allocated (on the "bar" nodes), - // all the "foo" nodes are gone - ensureGreen(IDX); - - // Start another "foo" node and make sure the index moves back - logger.info("--> starting additional 'foo' node"); - String newFooNode = internalCluster().startNode(fooSettings); - - assertShardCountOn(newFooNode, 5); - assertNoShardsOn(barNodes); - } - - public void testDeletingClosedIndexRemovesFiles() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath.getParent()); - - final int numNodes = randomIntBetween(2, 5); - logger.info("--> starting {} nodes", numNodes); - final List nodes = internalCluster().startNodes(numNodes, nodeSettings); - final String IDX = "test"; - final Tuple numPrimariesAndReplicas = randomPrimariesAndReplicas(numNodes); - final int numPrimaries = numPrimariesAndReplicas.v1(); - final int numReplicas = numPrimariesAndReplicas.v2(); - logger.info("--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas); - - assert numPrimaries > 0; - assert numReplicas >= 0; - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - int docCount = randomIntBetween(10, 100); - List builders = new ArrayList<>(); - for (int i = 0; i < docCount; i++) { - builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar")); - } - indexRandom(true, true, true, builders); - flushAndRefresh(IDX); - - logger.info("--> closing index {}", IDX); - client().admin().indices().prepareClose(IDX).get(); - ensureGreen(IDX); - - logger.info("--> deleting closed index"); - client().admin().indices().prepareDelete(IDX).get(); - assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); - assertPathHasBeenCleared(dataPath); - assertIndicesDirsDeleted(nodes); - } - - public void testNodeJoinsWithoutShadowReplicaConfigured() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(2, nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - flushAndRefresh(IDX); - - internalCluster().startNodes(1); - ensureYellow(IDX); - - final ClusterHealthResponse clusterHealth = client().admin().cluster() - .prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); - assertThat(clusterHealth.getNumberOfNodes(), equalTo(3)); - // the new node is not configured for a shadow replica index, so no shards should have been assigned to it - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - } - - private static void assertIndicesDirsDeleted(final List nodes) throws IOException { - for (String node : nodes) { - final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, node); - assertThat(nodeEnv.availableIndexFolders(), equalTo(Collections.emptySet())); - } - } - - private static Tuple randomPrimariesAndReplicas(final int numNodes) { - final int numPrimaries; - final int numReplicas; - if (randomBoolean()) { - // test with some nodes having no shards - numPrimaries = 1; - numReplicas = randomIntBetween(0, numNodes - 2); - } else { - // test with all nodes having at least one shard - numPrimaries = randomIntBetween(1, 5); - numReplicas = numNodes - 1; - } - return Tuple.tuple(numPrimaries, numReplicas); - } - -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 9d9631e1b006c..12071f0eac799 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -38,10 +38,7 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -105,19 +102,6 @@ public void testOverrideDefaultIndexAnalyzerIsUnsupported() { assertTrue(e.getMessage().contains("[index.analysis.analyzer.default_index] is not supported")); } - public void testBackCompatOverrideDefaultIndexAnalyzer() { - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default_index", analyzerProvider("default_index")), emptyMap(), emptyMap(), emptyMap(), emptyMap()); - assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); - assertWarnings("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] " + - "instead for index [index]"); - } - public void testOverrideDefaultSearchAnalyzer() { Version version = VersionUtils.randomVersion(random()); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); @@ -128,22 +112,6 @@ public void testOverrideDefaultSearchAnalyzer() { assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); } - public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() { - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - Map> analyzers = new HashMap<>(); - analyzers.put("default_index", analyzerProvider("default_index")); - analyzers.put("default_search", analyzerProvider("default_search")); - IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - analyzers, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); - assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertWarnings("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] " + - "instead for index [index]"); - } - public void testConfigureCamelCaseTokenFilter() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); Settings indexSettings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 7dc55b4370009..214515d1702ca 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -56,18 +56,18 @@ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_2_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_5_0_0))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)); // same lucene version should be cached - assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_0), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_1)); + assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_2_UNRELEASED), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_3_UNRELEASED)); - assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_0), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_2_0)); + assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_0), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_1)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 4b0c833b6648d..26590c54ffe84 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -88,7 +88,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; @@ -149,12 +148,13 @@ import java.util.Arrays; import java.util.Base64; import java.util.Collections; -import java.util.HashMap; +import java.util.Comparator; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Queue; import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; @@ -1809,8 +1809,19 @@ public void testConcurrentGetAndSetOnPrimary() throws IOException, InterruptedEx Thread[] thread = new Thread[randomIntBetween(3, 5)]; CountDownLatch startGun = new CountDownLatch(thread.length); final int opsPerThread = randomIntBetween(10, 20); - final Set currentValues = ConcurrentCollections.newConcurrentSet(); + class OpAndVersion { + final long version; + final String removed; + final String added; + + OpAndVersion(long version, String removed, String added) { + this.version = version; + this.removed = removed; + this.added = added; + } + } final AtomicInteger idGenerator = new AtomicInteger(); + final Queue history = ConcurrentCollections.newQueue(); ParsedDocument doc = testParsedDocument("1", "test", null, testDocument(), bytesArray(""), null); final Term uidTerm = newUid(doc); engine.index(indexForDoc(doc)); @@ -1838,10 +1849,7 @@ public void testConcurrentGetAndSetOnPrimary() throws IOException, InterruptedEx PRIMARY, System.currentTimeMillis(), -1, false); Engine.IndexResult indexResult = engine.index(index); if (indexResult.hasFailure() == false) { - boolean exists = removed == null ? true : currentValues.remove(removed); - assertTrue(removed + " should exist", exists); - exists = currentValues.add(added); - assertTrue(added + " should not exist", exists); + history.add(new OpAndVersion(indexResult.getVersion(), removed, added)); } } catch (IOException e) { @@ -1854,6 +1862,20 @@ public void testConcurrentGetAndSetOnPrimary() throws IOException, InterruptedEx for (int i = 0; i < thread.length; i++) { thread[i].join(); } + List sortedHistory = new ArrayList<>(history); + sortedHistory.sort(Comparator.comparing(o -> o.version)); + Set currentValues = new HashSet<>(); + for (int i = 0; i < sortedHistory.size(); i++) { + OpAndVersion op = sortedHistory.get(i); + if (i > 0) { + assertThat("duplicate version", op.version, not(equalTo(sortedHistory.get(i - 1).version))); + } + boolean exists = op.removed == null ? true : currentValues.remove(op.removed); + assertTrue(op.removed + " should exist", exists); + exists = currentValues.add(op.added); + assertTrue(op.added + " should not exist", exists); + } + try (Engine.GetResult get = engine.get(new Engine.Get(true, uidTerm))) { FieldsVisitor visitor = new FieldsVisitor(true); get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java deleted file mode 100644 index 53708b28dfb0f..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ /dev/null @@ -1,1000 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.document.TextField; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; -import org.apache.lucene.index.LiveIndexWriterConfig; -import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.SnapshotDeletionPolicy; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; -import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.shard.RefreshListeners; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.DirectoryUtils; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.test.DummyShardLock; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class ShadowEngineTests extends ESTestCase { - - protected final ShardId shardId = new ShardId("index", "_na_", 1); - - protected ThreadPool threadPool; - - private Store store; - private Store storeReplica; - - - protected Engine primaryEngine; - protected Engine replicaEngine; - - private IndexSettings defaultSettings; - private String codecName; - private Path dirPath; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - CodecService codecService = new CodecService(null, logger); - String name = Codec.getDefault().getName(); - if (Arrays.asList(codecService.availableCodecs()).contains(name)) { - // some codecs are read only so we only take the ones that we have in the service and randomly - // selected by lucene test case. - codecName = name; - } else { - codecName = "default"; - } - defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()); // TODO randomize more settings - - threadPool = new TestThreadPool(getClass().getName()); - dirPath = createTempDir(); - store = createStore(dirPath); - storeReplica = createStore(dirPath); - Lucene.cleanLuceneIndex(store.directory()); - Lucene.cleanLuceneIndex(storeReplica.directory()); - primaryEngine = createInternalEngine(store, createTempDir("translog-primary")); - LiveIndexWriterConfig currentIndexWriterConfig = ((InternalEngine)primaryEngine).getCurrentIndexWriterConfig(); - - assertEquals(primaryEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); - if (randomBoolean()) { - primaryEngine.config().setEnableGcDeletes(false); - } - - replicaEngine = createShadowEngine(storeReplica); - - assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - if (randomBoolean()) { - replicaEngine.config().setEnableGcDeletes(false); - } - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - replicaEngine.close(); - storeReplica.close(); - primaryEngine.close(); - store.close(); - terminate(threadPool); - } - - private ParseContext.Document testDocumentWithTextField() { - ParseContext.Document document = testDocument(); - document.add(new TextField("value", "test", Field.Store.YES)); - return document; - } - - private ParseContext.Document testDocument() { - return new ParseContext.Document(); - } - - - private ParsedDocument testParsedDocument(String id, String type, String routing, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) { - Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); - Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); - document.add(uidField); - document.add(versionField); - document.add(seqID.seqNo); - document.add(seqID.seqNoDocValue); - document.add(seqID.primaryTerm); - document.add(new LongPoint("point_field", 42)); // so that points report memory/disk usage - return new ParsedDocument(versionField, seqID, id, type, routing, Arrays.asList(document), source, XContentType.JSON, - mappingsUpdate); - } - - protected Store createStore(Path p) throws IOException { - return createStore(newMockFSDirectory(p)); - } - - - protected Store createStore(final Directory directory) throws IOException { - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY); - final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return directory; - } - }; - return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } - - protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() { - return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - } - - protected ShadowEngine createShadowEngine(Store store) { - return createShadowEngine(defaultSettings, store); - } - - protected InternalEngine createInternalEngine(Store store, Path translogPath) { - return createInternalEngine(defaultSettings, store, translogPath); - } - - protected ShadowEngine createShadowEngine(IndexSettings indexSettings, Store store) { - return new ShadowEngine(config(indexSettings, store, null, null, null)); - } - - protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath) { - return createInternalEngine(indexSettings, store, translogPath, newMergePolicy()); - } - - protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null); - return new InternalEngine(config); - } - - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - RefreshListeners refreshListeners) { - IndexWriterConfig iwc = newIndexWriterConfig(); - final EngineConfig.OpenMode openMode; - try { - if (Lucene.indexExists(store.directory()) == false) { - openMode = EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG; - } else { - openMode = EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG; - } - } catch (IOException e) { - throw new ElasticsearchException("can't find index?", e); - } - Engine.EventListener eventListener = new Engine.EventListener() { - @Override - public void onFailedEngine(String reason, @Nullable Exception e) { - // we don't need to notify anybody in this test - } - }; - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), - mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, null, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), refreshListeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); - - return config; - } - -// protected Term newUid(String id) { -// return new Term("_uid", id); -// } - - protected Term newUid(ParsedDocument doc) { - return new Term("_uid", doc.uid()); - } - - private Engine.Index indexForDoc(ParsedDocument doc) { - return new Engine.Index(newUid(doc), doc); - } - - protected static final BytesReference B_1 = new BytesArray(new byte[]{1}); - protected static final BytesReference B_2 = new BytesArray(new byte[]{2}); - protected static final BytesReference B_3 = new BytesArray(new byte[]{3}); - - public void testCommitStats() throws IOException { - // create a doc and refresh - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - - CommitStats stats1 = replicaEngine.commitStats(); - assertThat(stats1.getGeneration(), greaterThan(0L)); - assertThat(stats1.getId(), notNullValue()); - assertThat(stats1.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); - - // flush the primary engine - primaryEngine.flush(); - // flush on replica to make flush visible - replicaEngine.flush(); - - CommitStats stats2 = replicaEngine.commitStats(); - assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration())); - assertThat(stats2.getId(), notNullValue()); - assertThat(stats2.getId(), not(equalTo(stats1.getId()))); - assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); - assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY)); - assertThat(stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY))); - } - - public void testSegments() throws Exception { - primaryEngine.close(); // recreate without merging - primaryEngine = createInternalEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE); - List segments = primaryEngine.segments(false); - assertThat(segments.isEmpty(), equalTo(true)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(0L)); - assertThat(primaryEngine.segmentsStats(false).getMemoryInBytes(), equalTo(0L)); - - // create a doc and refresh - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - - ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null); - primaryEngine.index(indexForDoc(doc2)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(1)); - SegmentsStats stats = primaryEngine.segmentsStats(false); - assertThat(stats.getCount(), equalTo(1L)); - assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getPointsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L)); - assertThat(segments.get(0).isCommitted(), equalTo(false)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertTrue(segments.get(0).isCompound()); - assertThat(segments.get(0).ramTree, nullValue()); - - // Check that the replica sees nothing - segments = replicaEngine.segments(false); - assertThat(segments.size(), equalTo(0)); - stats = replicaEngine.segmentsStats(false); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getTermsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getStoredFieldsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getNormsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getPointsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getDocValuesMemoryInBytes(), equalTo(0L)); - assertThat(segments.size(), equalTo(0)); - - // flush the primary engine - primaryEngine.flush(); - // refresh the replica - replicaEngine.refresh("tests"); - - // Check that the primary AND replica sees segments now - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(1)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(1L)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - segments = replicaEngine.segments(false); - assertThat(segments.size(), equalTo(1)); - assertThat(replicaEngine.segmentsStats(false).getCount(), equalTo(1L)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - - ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null); - primaryEngine.index(indexForDoc(doc3)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(primaryEngine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(primaryEngine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getPointsMemoryInBytes(), greaterThan(stats.getPointsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - // Make visible to shadow replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - segments = replicaEngine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(replicaEngine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(replicaEngine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(replicaEngine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getPointsMemoryInBytes(), greaterThan(stats.getPointsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(1).isCommitted(), equalTo(true)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - primaryEngine.delete(new Engine.Delete("test", "1", newUid(doc))); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(1).isCommitted(), equalTo(true)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - // Make visible to shadow replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - ParsedDocument doc4 = testParsedDocument("4", "test", null, testDocumentWithTextField(), B_3, null); - primaryEngine.index(indexForDoc(doc4)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(3)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(3L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(true)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - assertThat(segments.get(2).isCommitted(), equalTo(false)); - assertThat(segments.get(2).isSearch(), equalTo(true)); - assertThat(segments.get(2).getNumDocs(), equalTo(1)); - assertThat(segments.get(2).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(2).isCompound(), equalTo(true)); - } - - public void testVerboseSegments() throws Exception { - primaryEngine.close(); // recreate without merging - primaryEngine = createInternalEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE); - List segments = primaryEngine.segments(true); - assertThat(segments.isEmpty(), equalTo(true)); - - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(true); - assertThat(segments.size(), equalTo(1)); - assertThat(segments.get(0).ramTree, notNullValue()); - - ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null); - primaryEngine.index(indexForDoc(doc2)); - primaryEngine.refresh("test"); - ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null); - primaryEngine.index(indexForDoc(doc3)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(true); - assertThat(segments.size(), equalTo(3)); - assertThat(segments.get(0).ramTree, notNullValue()); - assertThat(segments.get(1).ramTree, notNullValue()); - assertThat(segments.get(2).ramTree, notNullValue()); - - // Now make the changes visible to the replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - segments = replicaEngine.segments(true); - assertThat(segments.size(), equalTo(3)); - assertThat(segments.get(0).ramTree, notNullValue()); - assertThat(segments.get(1).ramTree, notNullValue()); - assertThat(segments.get(2).ramTree, notNullValue()); - - } - - public void testShadowEngineIgnoresWriteOperations() throws Exception { - // create a document - ParseContext.Document document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null); - try { - replicaEngine.index(indexForDoc(doc)); - fail("should have thrown an exception"); - } catch (UnsupportedOperationException e) {} - replicaEngine.refresh("test"); - - // its not there... - Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - Engine.GetResult getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // index a document - document = testDocument(); - document.add(new TextField("value", "test1", Field.Store.YES)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - try { - replicaEngine.index(indexForDoc(doc)); - fail("should have thrown an exception"); - } catch (UnsupportedOperationException e) {} - replicaEngine.refresh("test"); - - // its still not there... - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // Now, add a document to the primary so we can test shadow engine deletes - document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.flush(); - replicaEngine.refresh("test"); - - // Now the replica can see it - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - // And the replica can retrieve it - getResult = replicaEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // try to delete it on the replica - try { - replicaEngine.delete(new Engine.Delete("test", "1", newUid(doc))); - fail("should have thrown an exception"); - } catch (UnsupportedOperationException e) {} - replicaEngine.flush(); - replicaEngine.refresh("test"); - primaryEngine.refresh("test"); - - // it's still there! - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - getResult = replicaEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // it's still there on the primary also! - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - getResult = primaryEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - } - - public void testSimpleOperations() throws Exception { - Engine.Searcher searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - searchResult.close(); - - // create a document - ParseContext.Document document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // not on the replica either... - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // but, we can still get it (in realtime) - Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // can't get it from the replica, because it's not in the translog for a shadow replica - getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // but, not there non realtime - getResult = primaryEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - getResult.release(); - - // now its there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - // also in non realtime - getResult = primaryEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // still not in the replica because no flush - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // now do an update - document = testDocument(); - document.add(new TextField("value", "test1", Field.Store.YES)); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_2), SourceFieldMapper.Defaults.FIELD_TYPE)); - doc = testParsedDocument("1", "test", null, document, B_2, null); - primaryEngine.index(indexForDoc(doc)); - - // its not updated yet... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // but, we can still get it (in realtime) - getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // refresh and it should be updated - primaryEngine.refresh("test"); - - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // flush, now shadow replica should have the files - primaryEngine.flush(); - - // still not in the replica because the replica hasn't refreshed - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - replicaEngine.refresh("test"); - - // the replica finally sees it because primary has flushed and replica refreshed - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // now delete - primaryEngine.delete(new Engine.Delete("test", "1", newUid(doc))); - - // its not deleted yet - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // but, get should not see it (in realtime) - getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // refresh and it should be deleted - primaryEngine.refresh("test"); - - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // add it back - document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // refresh and it should be there - primaryEngine.refresh("test"); - - // now its there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // now flush - primaryEngine.flush(); - - // and, verify get (in real time) - getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // the replica should see it if we refresh too! - replicaEngine.refresh("test"); - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // make sure we can still work with the engine - // now do an update - document = testDocument(); - document.add(new TextField("value", "test1", Field.Store.YES)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not updated yet... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // refresh and it should be updated - primaryEngine.refresh("test"); - - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // Make visible to shadow replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - } - - public void testSearchResultRelease() throws Exception { - Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - searchResult.close(); - - // create a document - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // flush & refresh and it should everywhere - primaryEngine.flush(); - primaryEngine.refresh("test"); - replicaEngine.refresh("test"); - - // now its there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - // don't release the replica search result yet... - - // delete, refresh and do a new search, it should not be there - primaryEngine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc))); - primaryEngine.flush(); - primaryEngine.refresh("test"); - replicaEngine.refresh("test"); - Engine.Searcher updateSearchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - updateSearchResult.close(); - - // the non released replica search result should not see the deleted yet... - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - } - - public void testFailEngineOnCorruption() throws IOException { - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.flush(); - MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class); - leaf.setRandomIOExceptionRate(1.0); - leaf.setRandomIOExceptionRateOnOpen(1.0); - try { - replicaEngine.refresh("foo"); - fail("exception expected"); - } catch (Exception ex) { - - } - try { - Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - fail("exception expected"); - } catch (AlreadyClosedException ex) { - // all is well - } - } - - public void testExtractShardId() { - try (Engine.Searcher test = replicaEngine.acquireSearcher("test")) { - ShardId shardId = ShardUtils.extractShardId(test.getDirectoryReader()); - assertNotNull(shardId); - assertEquals(shardId, replicaEngine.config().getShardId()); - } - } - - /** - * Random test that throws random exception and ensures all references are - * counted down / released and resources are closed. - */ - public void testFailStart() throws IOException { - // Need a commit point for this - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.flush(); - - // this test fails if any reader, searcher or directory is not closed - MDW FTW - final int iters = scaledRandomIntBetween(10, 100); - for (int i = 0; i < iters; i++) { - MockDirectoryWrapper wrapper = newMockFSDirectory(dirPath); - wrapper.setFailOnOpenInput(randomBoolean()); - wrapper.setAllowRandomFileNotFoundException(randomBoolean()); - wrapper.setRandomIOExceptionRate(randomDouble()); - wrapper.setRandomIOExceptionRateOnOpen(randomDouble()); - try (Store store = createStore(wrapper)) { - int refCount = store.refCount(); - assertTrue("refCount: "+ store.refCount(), store.refCount() > 0); - ShadowEngine holder; - try { - holder = createShadowEngine(store); - } catch (EngineCreationFailureException ex) { - assertEquals(store.refCount(), refCount); - continue; - } - assertEquals(store.refCount(), refCount+1); - final int numStarts = scaledRandomIntBetween(1, 5); - for (int j = 0; j < numStarts; j++) { - try { - assertEquals(store.refCount(), refCount + 1); - holder.close(); - holder = createShadowEngine(store); - assertEquals(store.refCount(), refCount + 1); - } catch (EngineCreationFailureException ex) { - // all is fine - assertEquals(store.refCount(), refCount); - break; - } - } - holder.close(); - assertEquals(store.refCount(), refCount); - } - } - } - - public void testSettings() { - CodecService codecService = new CodecService(null, logger); - assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - } - - public void testShadowEngineCreationRetry() throws Exception { - final Path srDir = createTempDir(); - final Store srStore = createStore(srDir); - Lucene.cleanLuceneIndex(srStore.directory()); - - final AtomicBoolean succeeded = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - - // Create a shadow Engine, which will freak out because there is no - // index yet - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - latch.await(); - } catch (InterruptedException e) { - // ignore interruptions - } - try (ShadowEngine srEngine = createShadowEngine(srStore)) { - succeeded.set(true); - } catch (Exception e) { - fail("should have been able to create the engine!"); - } - } - }); - t.start(); - - // count down latch - // now shadow engine should try to be created - latch.countDown(); - - // Create an InternalEngine, which creates the index so the shadow - // replica will handle it correctly - Store pStore = createStore(srDir); - InternalEngine pEngine = createInternalEngine(pStore, createTempDir("translog-primary")); - - // create a document - ParseContext.Document document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null); - pEngine.index(indexForDoc(doc)); - pEngine.flush(true, true); - - t.join(); - assertTrue("ShadowEngine should have been able to be created", succeeded.get()); - // (shadow engine is already shut down in the try-with-resources) - IOUtils.close(srStore, pEngine, pStore); - } - - public void testNoTranslog() { - try { - replicaEngine.getTranslog(); - fail("shadow engine has no translog"); - } catch (UnsupportedOperationException ex) { - // all good - } - } - - public void testRefreshListenersFails() throws IOException { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), - new RefreshListeners(null, null, null, logger)); - Exception e = expectThrows(IllegalArgumentException.class, () -> new ShadowEngine(config)); - assertEquals("ShadowEngine doesn't support RefreshListeners", e.getMessage()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 95f04e799b6e8..cef6806631f17 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -31,16 +31,14 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperService; @@ -53,7 +51,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -61,7 +58,6 @@ import java.util.Collection; import java.util.List; -import static org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; @@ -135,9 +131,7 @@ public > IFD getForField(String type, String field @Before public void setup() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_0); // we need 2.x so that fielddata is allowed on string fields - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - indexService = createIndex("test", settings); + indexService = createIndex("test"); mapperService = indexService.mapperService(); indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache(); ifdService = indexService.fieldData(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java index 2fc2d8e382cdc..57273d213b305 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java @@ -18,23 +18,18 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.io.IOException; import java.util.Collection; import java.util.Set; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.hamcrest.Matchers.containsString; public abstract class AbstractNumericFieldMapperTestCase extends ESSingleNodeTestCase { @@ -117,20 +112,6 @@ public void testEmptyName() throws IOException { ); assertThat(e.getMessage(), containsString("name cannot be empty string")); } - - // before 5.x - Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); - Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); - indexService = createIndex("test_old", oldIndexSettings); - parser = indexService.mapperService().documentMapperParser(); - for (String type : TYPES) { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("").field("type", type).endObject().endObject() - .endObject().endObject().string(); - - DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, defaultMapper.mappingSource().string()); - } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 8088c8576fe5a..1651091240f63 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -54,7 +54,8 @@ protected Collection> getPlugins() { } public void testExternalValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); MapperRegistry mapperRegistry = new MapperRegistry( @@ -102,7 +103,8 @@ public void testExternalValues() throws Exception { } public void testExternalValuesWithMultifield() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); Map mapperParsers = new HashMap<>(); @@ -167,7 +169,8 @@ public void testExternalValuesWithMultifield() throws Exception { } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); Map mapperParsers = new HashMap<>(); diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 596746938a213..26637734494e5 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -148,32 +148,16 @@ public void testValidation() { GeoBoundingBoxQueryBuilder builder = createTestQueryBuilder(); tester.invalidateCoordinate(builder.setValidationMethod(GeoValidationMethod.COERCE), false); - except = builder.checkLatLon(true); - assertNull("Inner post 2.0 validation w/ coerce should ignore invalid " + except = builder.checkLatLon(); + assertNull("validation w/ coerce should ignore invalid " + tester.getClass().getName() + " coordinate: " + tester.invalidCoordinate + " ", except); - tester.invalidateCoordinate(builder.setValidationMethod(GeoValidationMethod.COERCE), false); - except = builder.checkLatLon(false); - assertNull("Inner pre 2.0 validation w/ coerce should ignore invalid coordinate: " - + tester.getClass().getName() - + " coordinate: " - + tester.invalidCoordinate + " ", - except); - - tester.invalidateCoordinate(builder.setValidationMethod(GeoValidationMethod.STRICT), false); - except = builder.checkLatLon(true); - assertNull("Inner pre 2.0 validation w/o coerce should ignore invalid coordinate for old indexes: " - + tester.getClass().getName() - + " coordinate: " - + tester.invalidCoordinate, - except); - tester.invalidateCoordinate(builder.setValidationMethod(GeoValidationMethod.STRICT), false); - except = builder.checkLatLon(false); - assertNotNull("Inner post 2.0 validation w/o coerce should detect invalid coordinate: " + except = builder.checkLatLon(); + assertNotNull("validation w/o coerce should detect invalid coordinate: " + tester.getClass().getName() + " coordinate: " + tester.invalidCoordinate, diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 40ebb0e4a15a6..18da1d37b7228 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -80,13 +80,8 @@ protected MatchQueryBuilder doCreateTestQueryBuilder() { MatchQueryBuilder matchQuery = new MatchQueryBuilder(fieldName, value); matchQuery.operator(randomFrom(Operator.values())); - if (randomBoolean()) { - if (fieldName.equals(DATE_FIELD_NAME)) { - // tokenized dates would trigger parse errors - matchQuery.analyzer(randomFrom("keyword", "whitespace")); - } else { - matchQuery.analyzer(randomFrom("simple", "keyword", "whitespace")); - } + if (randomBoolean() && fieldName.equals(STRING_FIELD_NAME)) { + matchQuery.analyzer(randomFrom("simple", "keyword", "whitespace")); } if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean()) { @@ -424,6 +419,15 @@ public void testParseFailsWithTermsArray() throws Exception { expectThrows(IllegalStateException.class, () -> parseQuery(json2)); } + public void testExceptionUsingAnalyzerOnNumericField() { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + QueryShardContext shardContext = createShardContext(); + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder(DOUBLE_FIELD_NAME, 6.075210893508043E-4); + matchQueryBuilder.analyzer("simple"); + NumberFormatException e = expectThrows(NumberFormatException.class, () -> matchQueryBuilder.toQuery(shardContext)); + assertEquals("For input string: \"e\"", e.getMessage()); + } + @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge("t_boost", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("t_boost", diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 63fa1eea01734..990137184fe68 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -85,13 +85,8 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.operator(randomFrom(Operator.values())); } - if (randomBoolean()) { - if (fieldName.equals(DATE_FIELD_NAME)) { - // tokenized dates would trigger parse errors - query.analyzer("keyword"); - } else { - query.analyzer(randomAnalyzer()); - } + if (randomBoolean() && fieldName.equals(STRING_FIELD_NAME)) { + query.analyzer(randomAnalyzer()); } if (randomBoolean()) { query.slop(randomIntBetween(0, 5)); @@ -276,7 +271,7 @@ public void testFuzzinessNotAllowedTypes() throws IOException { } } - public void testQueryParameterArrayException() throws IOException { + public void testQueryParameterArrayException() { String json = "{\n" + " \"multi_match\" : {\n" + @@ -289,6 +284,16 @@ public void testQueryParameterArrayException() throws IOException { assertEquals("[multi_match] unknown token [START_ARRAY] after [query]", e.getMessage()); } + public void testExceptionUsingAnalyzerOnNumericField() { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + QueryShardContext shardContext = createShardContext(); + MultiMatchQueryBuilder multiMatchQueryBuilder = new MultiMatchQueryBuilder(6.075210893508043E-4); + multiMatchQueryBuilder.field(DOUBLE_FIELD_NAME); + multiMatchQueryBuilder.analyzer("simple"); + NumberFormatException e = expectThrows(NumberFormatException.class, () -> multiMatchQueryBuilder.toQuery(shardContext)); + assertEquals("For input string: \"e\"", e.getMessage()); + } + public void testFuzzinessOnNonStringField() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); MultiMatchQueryBuilder query = new MultiMatchQueryBuilder(42).field(INT_FIELD_NAME).field(BOOLEAN_FIELD_NAME); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 1431391d79864..1d1af2b2fc591 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -410,7 +410,8 @@ public void onResponse(PrimaryResult result) { public void onFailure(Exception e) { listener.onFailure(e); } - }, true, new ReplicasRef(), () -> null, logger, opType) { + }, new ReplicasRef(), () -> null, logger, opType) { + @Override protected List getShards(ShardId shardId, ClusterState state) { return replicationGroup.shardRoutings(); diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 97e224f04a4e3..12f749e681918 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -243,11 +243,6 @@ public long addDocument(Iterable doc) throws IOExcepti }, null, config); } - - @Override - public Engine newReadOnlyEngine(EngineConfig config) { - throw new UnsupportedOperationException(); - } }; } else { return null; diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b106a308098c9..b328e86e58ded 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -284,7 +284,7 @@ public void testOperationLocksOnPrimaryShards() throws InterruptedException, Exe true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); } else if (randomBoolean()) { // simulate promotion - indexShard = newShard(shardId, false); + indexShard = newStartedShard(false); ShardRouting replicaRouting = indexShard.routingEntry(); indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1); ShardRouting primaryRouting = TestShardRouting.newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 85bd4b6166c8a..fda2f8ef7d039 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -111,15 +111,10 @@ public void testGetRootPaths() throws IOException { final Path customPath; if (useCustomDataPath) { final Path path = createTempDir(); - final boolean includeNodeId = randomBoolean(); indexSettings = indexSettingsBuilder.put(IndexMetaData.SETTING_DATA_PATH, "custom").build(); nodeSettings = Settings.builder().put(Environment.PATH_SHARED_DATA_SETTING.getKey(), path.toAbsolutePath().toAbsolutePath()) - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), includeNodeId).build(); - if (includeNodeId) { - customPath = path.resolve("custom").resolve("0"); - } else { - customPath = path.resolve("custom"); - } + .build(); + customPath = path.resolve("custom").resolve("0"); } else { customPath = null; indexSettings = indexSettingsBuilder.build(); diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index 57d025128d8a0..0d730eff9f5ea 100644 --- a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -20,8 +20,6 @@ import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; @@ -51,20 +49,9 @@ public void testOverrideBuiltInSimilarity() { } } - // Pre v3 indices could override built-in similarities - public void testOverrideBuiltInSimilarityPreV3() { - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) - .put("index.similarity.BM25.type", "classic") - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); - SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); - assertTrue(service.getSimilarity("BM25") instanceof ClassicSimilarityProvider); - } - - // Tests #16594 public void testOverrideDefaultSimilarity() { - Settings settings = Settings.builder().put("index.similarity.default.type", "classic").build(); + Settings settings = Settings.builder().put("index.similarity.default.type", "classic") + .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); assertTrue(service.getDefaultSimilarity() instanceof ClassicSimilarity); diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index e9183876aecb6..3e7f4650c3e6d 100644 --- a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.similarity; -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.AfterEffectL; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModelG; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.DFRSimilarity; import org.apache.lucene.search.similarities.DistributionSPL; import org.apache.lucene.search.similarities.IBSimilarity; @@ -34,19 +34,15 @@ import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collection; @@ -248,32 +244,4 @@ public void testResolveSimilaritiesFromMapping_Unknown() throws IOException { assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for field [field1]")); } } - - public void testSimilarityDefaultBackCompat() throws IOException { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties") - .startObject("field1") - .field("similarity", "default") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject().string(); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0)) - .build(); - - DocumentMapperParser parser = createIndex("test_v2.x", settings).mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(ClassicSimilarityProvider.class)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().name(), equalTo("classic")); - - parser = createIndex("test_v3.x").mapperService().documentMapperParser(); - try { - parser.parse("type", new CompressedXContent(mapping)); - fail("Expected MappingParsingException"); - } catch (MapperParsingException e) { - assertThat(e.getMessage(), equalTo("Unknown Similarity type [default] for field [field1]")); - } - } } diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index 5f1578488cf1e..8a63c237e90d5 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -40,6 +40,9 @@ import static org.hamcrest.Matchers.is; public class FileInfoTests extends ESTestCase { + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; + public void testToFromXContent() throws IOException { final int iters = scaledRandomIntBetween(1, 10); for (int iter = 0; iter < iters; iter++) { @@ -48,7 +51,8 @@ public void testToFromXContent() throws IOException { for (int i = 0; i < hash.length; i++) { hash.bytes[i] = randomByte(); } - StoreFileMetaData meta = new StoreFileMetaData("foobar", Math.abs(randomLong()), randomAlphaOfLengthBetween(1, 10), Version.LATEST, hash); + StoreFileMetaData meta = new StoreFileMetaData("foobar", Math.abs(randomLong()), randomAlphaOfLengthBetween(1, 10), + Version.LATEST, hash); ByteSizeValue size = new ByteSizeValue(Math.abs(randomLong())); BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("_foobar", meta, size); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); @@ -140,14 +144,16 @@ public void testInvalidFieldsInFromXContent() throws IOException { } public void testGetPartSize() { - BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666"), new ByteSizeValue(6)); + BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666", + MIN_SUPPORTED_LUCENE_VERSION), new ByteSizeValue(6)); int numBytes = 0; for (int i = 0; i < info.numberOfParts(); i++) { numBytes += info.partBytes(i); } assertEquals(numBytes, 36); - info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666"), new ByteSizeValue(6)); + info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666", + MIN_SUPPORTED_LUCENE_VERSION), new ByteSizeValue(6)); numBytes = 0; for (int i = 0; i < info.numberOfParts(); i++) { numBytes += info.partBytes(i); @@ -155,7 +161,8 @@ public void testGetPartSize() { assertEquals(numBytes, 35); final int numIters = randomIntBetween(10, 100); for (int j = 0; j < numIters; j++) { - StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666"); + StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666", + MIN_SUPPORTED_LUCENE_VERSION); info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000))); numBytes = 0; for (int i = 0; i < info.numberOfParts(); i++) { diff --git a/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java b/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java index 3a56763cd59fd..0a72037b7d8c0 100644 --- a/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java @@ -39,33 +39,6 @@ public class FsDirectoryServiceTests extends ESTestCase { - public void testHasSleepWrapperOnSharedFS() throws IOException { - Settings build = randomBoolean() ? - Settings.builder().put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build() : - Settings.builder().put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).build();; - IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); - IndexStore store = new IndexStore(settings); - Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); - Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); - FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path); - Directory directory = fsDirectoryService.newDirectory(); - assertTrue(directory.getClass().toString(), directory instanceof SleepingLockWrapper); - } - - public void testHasNoSleepWrapperOnNormalFS() throws IOException { - Settings build = Settings.builder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs").build(); - IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); - IndexStore store = new IndexStore(settings); - Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); - Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); - FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path); - Directory directory = fsDirectoryService.newDirectory(); - assertFalse(directory instanceof SleepingLockWrapper); - assertTrue(directory instanceof SimpleFSDirectory); - } - public void testPreload() throws IOException { doTestPreload(); doTestPreload("nvd", "dvd", "tim"); diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java index 156c2132358f5..dfc24d73c9786 100644 --- a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -98,7 +98,10 @@ public class StoreTests extends ESTestCase { - private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); + private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); + private static final Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; public void testRefCount() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); @@ -169,7 +172,8 @@ public void testVerifyingIndexOutput() throws IOException { indexInput.seek(0); BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024)); long length = indexInput.length(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum, + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); while (length > 0) { if (random().nextInt(10) == 0) { verifyingOutput.writeByte(indexInput.readByte()); @@ -200,7 +204,8 @@ public void testVerifyingIndexOutput() throws IOException { public void testVerifyingIndexOutputOnEmptyFile() throws IOException { Directory dir = newDirectory(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0)), + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0), + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { Store.verify(verifyingOutput); @@ -229,7 +234,8 @@ public void testChecksumCorrupted() throws IOException { indexInput.seek(0); BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024)); long length = indexInput.length(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum, + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); length -= 8; // we write the checksum in the try / catch block below while (length > 0) { if (random().nextInt(10) == 0) { @@ -283,7 +289,8 @@ private void appendRandomData(IndexOutput output) throws IOException { public void testVerifyingIndexOutputWithBogusInput() throws IOException { Directory dir = newDirectory(); int length = scaledRandomIntBetween(10, 1024); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, ""), dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, "", + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { while (length > 0) { verifyingOutput.writeByte((byte) random().nextInt()); @@ -831,8 +838,8 @@ public void testMetadataSnapshotStreaming() throws Exception { } protected Store.MetadataSnapshot createMetaDataSnapshot() { - StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666"); - StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666"); + StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); + StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); Map storeFileMetaDataMap = new HashMap<>(); storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1); storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index b94e864fdd6fc..e87dc24c8f85b 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -106,23 +106,6 @@ protected boolean resetNodeAfterTest() { return true; } - public void testCanDeleteIndexContent() throws IOException { - final IndicesService indicesService = getIndicesService(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_DATA_PATH, "/foo/bar") - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 3)) - .build()); - assertFalse("shard on shared filesystem", indicesService.canDeleteIndexContents(idxSettings.getIndex(), idxSettings)); - - final IndexMetaData.Builder newIndexMetaData = IndexMetaData.builder(idxSettings.getIndexMetaData()); - newIndexMetaData.state(IndexMetaData.State.CLOSE); - idxSettings = IndexSettingsModule.newIndexSettings(newIndexMetaData.build()); - assertTrue("shard on shared filesystem, but closed, so it should be deletable", - indicesService.canDeleteIndexContents(idxSettings.getIndex(), idxSettings)); - } - public void testCanDeleteShardContent() { IndicesService indicesService = getIndicesService(); IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas( diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 61d325a64e8f7..960d135371c07 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -23,10 +23,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ar.ArabicNormalizationFilter; -import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.analysis.de.GermanAnalyzer; -import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.fa.PersianNormalizationFilter; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter; @@ -123,83 +120,6 @@ public void testDefaultFactoryTokenFilters() throws IOException { assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class); } - public void testAnalyzerAlias() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "keyword") - .put("index.analysis.analyzer.foobar_search.alias","default_search") - .put("index.analysis.analyzer.foobar_search.type","english") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings); - assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class))); - assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class))); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices.", - "setting [index.analysis.analyzer.foobar_search.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testAnalyzerAliasReferencesAlias() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "german") - .put("index.analysis.analyzer.foobar_search.alias","default_search") - .put("index.analysis.analyzer.foobar_search.type", "default") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings); - - assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(GermanAnalyzer.class))); - // analyzer types are bound early before we resolve aliases - assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices.", - "setting [index.analysis.analyzer.foobar_search.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testAnalyzerAliasDefault() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "keyword") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings); - assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class))); - assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(KeywordAnalyzer.class))); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testAnalyzerAliasMoreThanOnce() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "keyword") - .put("index.analysis.analyzer.foobar1.alias","default") - .put("index.analysis.analyzer.foobar1.type", "english") - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getIndexAnalyzers(newRegistry, settings)); - assertEquals("alias [default] is already used by [foobar]", ise.getMessage()); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices.", - "setting [index.analysis.analyzer.foobar1.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - public void testAnalyzerAliasNotAllowedPost5x() throws IOException { Settings settings = Settings.builder() .put("index.analysis.analyzer.foobar.type", "standard") @@ -218,7 +138,7 @@ public void testVersionedAnalyzers() throws Exception { Settings settings2 = Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .build(); AnalysisRegistry newRegistry = getNewRegistry(settings2); IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2); @@ -231,8 +151,10 @@ public void testVersionedAnalyzers() throws Exception { // analysis service has the expected version assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); - assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("thai").analyzer().getVersion()); + assertEquals(Version.V_5_0_0.luceneVersion, + indexAnalyzers.get("standard").analyzer().getVersion()); + assertEquals(Version.V_5_0_0.luceneVersion, + indexAnalyzers.get("thai").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), indexAnalyzers.get("custom7").analyzer().getVersion()); @@ -355,40 +277,6 @@ public void testUnderscoreInAnalyzerName() throws IOException { } } - public void testUnderscoreInAnalyzerNameAlias() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.valid_name.tokenizer", "keyword") - .put("index.analysis.analyzer.valid_name.alias", "_invalid_name") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed for 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - try { - getIndexAnalyzers(settings); - fail("This should fail with IllegalArgumentException because the analyzers alias starts with _"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("analyzer name must not start with '_'. got \"_invalid_name\"")); - } - assertWarnings("setting [index.analysis.analyzer.valid_name.alias] is only allowed on index [test] because it was " + - "created before 5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testDeprecatedPositionOffsetGap() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.custom.tokenizer", "standard") - .put("index.analysis.analyzer.custom.position_offset_gap", "128") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - try { - getIndexAnalyzers(settings); - fail("Analyzer should fail if it has position_offset_gap"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Option [position_offset_gap] in Custom Analyzer [custom] " + - "has been renamed, please use [position_increment_gap] instead.")); - } - } - public void testRegisterHunspellDictionary() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index d3158c620d1a7..cf22c95ac6997 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -41,9 +41,12 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction.ShardEntry; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -217,23 +220,29 @@ public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean rero } public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { - List entries = failedShards.stream().map(failedShard -> - new ShardStateAction.ShardEntry(failedShard.getRoutingEntry().shardId(), failedShard.getRoutingEntry().allocationId().getId(), + List entries = failedShards.stream().map(failedShard -> + new ShardEntry(failedShard.getRoutingEntry().shardId(), failedShard.getRoutingEntry().allocationId().getId(), 0L, failedShard.getMessage(), failedShard.getFailure())) .collect(Collectors.toList()); - try { - return shardFailedClusterStateTaskExecutor.execute(clusterState, entries).resultingState; - } catch (Exception e) { - throw ExceptionsHelper.convertToRuntime(e); - } + return runTasks(shardFailedClusterStateTaskExecutor, clusterState, entries); } public ClusterState applyStartedShards(ClusterState clusterState, List startedShards) { - List entries = startedShards.stream().map(startedShard -> - new ShardStateAction.ShardEntry(startedShard.shardId(), startedShard.allocationId().getId(), 0L, "shard started", null)) + List entries = startedShards.stream().map(startedShard -> + new ShardEntry(startedShard.shardId(), startedShard.allocationId().getId(), 0L, "shard started", null)) .collect(Collectors.toList()); + return runTasks(shardStartedClusterStateTaskExecutor, clusterState, entries); + } + + private ClusterState runTasks(ClusterStateTaskExecutor executor, ClusterState clusterState, List entries) { try { - return shardStartedClusterStateTaskExecutor.execute(clusterState, entries).resultingState; + ClusterTasksResult result = executor.execute(clusterState, entries); + for (ClusterStateTaskExecutor.TaskResult taskResult : result.executionResults.values()) { + if (taskResult.isSuccess() == false) { + throw taskResult.getFailure(); + } + } + return result.resultingState; } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 064e9d78b511c..55e47dabdbfb7 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -91,15 +91,6 @@ public void tearDown() throws Exception { terminate(threadPool); } - /** - * needed due to random usage of {@link IndexMetaData#INDEX_SHADOW_REPLICAS_SETTING}. removed once - * shadow replicas are removed. - */ - @Override - protected boolean enableWarningsCheck() { - return false; - } - public void testRandomClusterStateUpdates() { // we have an IndicesClusterStateService per node in the cluster final Map clusterStateServiceMap = new HashMap<>(); @@ -249,10 +240,6 @@ public ClusterState randomlyUpdateClusterState(ClusterState state, Settings.Builder settingsBuilder = Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)) .put(SETTING_NUMBER_OF_REPLICAS, randomInt(2)); - if (randomBoolean()) { - settingsBuilder.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true); - } CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); assertTrue(state.metaData().hasIndex(name)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index 2c00c59c34384..0d1a5928fb8cd 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -36,7 +36,8 @@ import static java.util.Collections.emptySet; public class RecoveryStatusTests extends ESSingleNodeTestCase { - + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; public void testRenameTempFiles() throws IOException { IndexService service = createIndex("foo"); @@ -51,7 +52,8 @@ public void onRecoveryDone(RecoveryState state) { public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { } }, version -> {}); - try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store())) { + try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength() + , "9z51nw", MIN_SUPPORTED_LUCENE_VERSION), status.store())) { indexOutput.writeInt(1); IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar"); assertSame(openIndexOutput, indexOutput); @@ -60,7 +62,8 @@ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, bo } try { - status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store()); + status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw", + MIN_SUPPORTED_LUCENE_VERSION), status.store()); fail("file foo.bar is already opened and registered"); } catch (IllegalStateException ex) { assertEquals("output for file [foo.bar] has already been created", ex.getMessage()); diff --git a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java similarity index 93% rename from core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java rename to core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java index daaeab8014369..44c4b880fb68f 100644 --- a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.node.internal; +package org.elasticsearch.node; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cluster.ClusterName; @@ -196,4 +196,15 @@ public void testDefaultPropertiesOverride() throws Exception { Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props); assertEquals("bar", env.settings().get("setting")); } + + public void testDefaultWithArray() { + final Settings.Builder output = Settings.builder().put("foobar.0", "bar").put("foobar.1", "baz"); + final Map esSettings = Collections.singletonMap("default.foobar", "foo"); + InternalSettingsPreparer.initializeSettings(output, Settings.EMPTY, esSettings); + final Settings settings = output.build(); + assertThat(settings.get("foobar.0"), equalTo("bar")); + assertThat(settings.get("foobar.1"), equalTo("baz")); + assertNull(settings.get("foobar")); + } + } diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java index 04afdd5839181..d8d1fa6ccabaa 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java @@ -182,12 +182,12 @@ public void testReadFromPropertiesOldElasticsearchVersion() throws Exception { "description", "fake desc", "name", "my_plugin", "version", "1.0", - "elasticsearch.version", Version.V_2_0_0.toString()); + "elasticsearch.version", Version.V_5_0_0.toString()); try { PluginInfo.readFromProperties(pluginDir); fail("expected old elasticsearch version exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("was designed for version [2.0.0]")); + assertTrue(e.getMessage().contains("was designed for version [5.0.0]")); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 653e8d0a20f60..d8aab691d2aa1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -65,7 +65,8 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 7f158e0732f97..fc080dd0f04c4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.cursors.ObjectIntCursor; + import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -64,7 +65,8 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); static ObjectIntMap expectedDocCountsForGeoHash = null; static ObjectIntMap multiValuedExpectedDocCountsForGeoHash = null; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 7be372f328df4..cdd2251fb6e61 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -21,126 +21,323 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.BucketCollector; -import org.elasticsearch.search.aggregations.SearchContextAggregations; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.max.InternalMax; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; +import java.util.stream.DoubleStream; -import static org.hamcrest.Matchers.equalTo; +public class NestedAggregatorTests extends AggregatorTestCase { + + private static final String VALUE_FIELD_NAME = "number"; + private static final String NESTED_OBJECT = "nested_object"; + private static final String NESTED_OBJECT2 = "nested_object2"; + private static final String NESTED_AGG = "nestedAgg"; + private static final String MAX_AGG_NAME = "maxAgg"; + private static final String SUM_AGG_NAME = "sumAgg"; + + public void testNoDocs() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + // intentionally not writing any docs + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + NESTED_OBJECT); + MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME) + .field(VALUE_FIELD_NAME); + nestedBuilder.subAggregation(maxAgg); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + Nested nested = search(newSearcher(indexReader, true, true), + new MatchAllDocsQuery(), nestedBuilder, fieldType); + + assertEquals(NESTED_AGG, nested.getName()); + assertEquals(0, nested.getDocCount()); + + InternalMax max = (InternalMax) + ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + assertEquals(MAX_AGG_NAME, max.getName()); + assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), Double.MIN_VALUE); + } + } + } + + public void testSingleNestingMax() throws IOException { + int numRootDocs = randomIntBetween(1, 20); + int expectedNestedDocs = 0; + double expectedMaxValue = Double.NEGATIVE_INFINITY; + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numRootDocs; i++) { + List documents = new ArrayList<>(); + int numNestedDocs = randomIntBetween(0, 20); + expectedMaxValue = Math.max(expectedMaxValue, + generateMaxDocs(documents, numNestedDocs, i, NESTED_OBJECT, VALUE_FIELD_NAME)); + expectedNestedDocs += numNestedDocs; + + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#" + i, + UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", + TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + iw.addDocuments(documents); + } + iw.commit(); + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + NESTED_OBJECT); + MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME) + .field(VALUE_FIELD_NAME); + nestedBuilder.subAggregation(maxAgg); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + Nested nested = search(newSearcher(indexReader, true, true), + new MatchAllDocsQuery(), nestedBuilder, fieldType); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + assertEquals(NESTED_AGG, nested.getName()); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + InternalMax max = (InternalMax) + ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + assertEquals(MAX_AGG_NAME, max.getName()); + assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + } + } + } + + public void testDoubleNestingMax() throws IOException { + int numRootDocs = randomIntBetween(1, 20); + int expectedNestedDocs = 0; + double expectedMaxValue = Double.NEGATIVE_INFINITY; + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numRootDocs; i++) { + List documents = new ArrayList<>(); + int numNestedDocs = randomIntBetween(0, 20); + expectedMaxValue = Math.max(expectedMaxValue, + generateMaxDocs(documents, numNestedDocs, i, NESTED_OBJECT + "." + NESTED_OBJECT2, VALUE_FIELD_NAME)); + expectedNestedDocs += numNestedDocs; + + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#" + i, + UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", + TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + iw.addDocuments(documents); + } + iw.commit(); + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + NESTED_OBJECT + "." + NESTED_OBJECT2); + MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME) + .field(VALUE_FIELD_NAME); + nestedBuilder.subAggregation(maxAgg); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + Nested nested = search(newSearcher(indexReader, true, true), + new MatchAllDocsQuery(), nestedBuilder, fieldType); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + assertEquals(NESTED_AGG, nested.getName()); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + InternalMax max = (InternalMax) + ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + assertEquals(MAX_AGG_NAME, max.getName()); + assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + } + } + } + + public void testOrphanedDocs() throws IOException { + int numRootDocs = randomIntBetween(1, 20); + int expectedNestedDocs = 0; + double expectedSum = 0; + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numRootDocs; i++) { + List documents = new ArrayList<>(); + int numNestedDocs = randomIntBetween(0, 20); + expectedSum += generateSumDocs(documents, numNestedDocs, i, NESTED_OBJECT, VALUE_FIELD_NAME); + expectedNestedDocs += numNestedDocs; + + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#" + i, + UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", + TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + iw.addDocuments(documents); + } + //add some random nested docs that don't belong + List documents = new ArrayList<>(); + int numOrphanedDocs = randomIntBetween(0, 20); + generateSumDocs(documents, numOrphanedDocs, 1234, "foo", VALUE_FIELD_NAME); + iw.addDocuments(documents); + iw.commit(); + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + NESTED_OBJECT); + SumAggregationBuilder sumAgg = new SumAggregationBuilder(SUM_AGG_NAME) + .field(VALUE_FIELD_NAME); + nestedBuilder.subAggregation(sumAgg); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + Nested nested = search(newSearcher(indexReader, true, true), + new MatchAllDocsQuery(), nestedBuilder, fieldType); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + assertEquals(NESTED_AGG, nested.getName()); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + InternalSum sum = (InternalSum) + ((InternalAggregation)nested).getProperty(SUM_AGG_NAME); + assertEquals(SUM_AGG_NAME, sum.getName()); + assertEquals(expectedSum, sum.getValue(), Double.MIN_VALUE); + } + } + } -public class NestedAggregatorTests extends ESSingleNodeTestCase { public void testResetRootDocId() throws Exception { - Directory directory = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); iwc.setMergePolicy(NoMergePolicy.INSTANCE); - RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc); - - List documents = new ArrayList<>(); - - // 1 segment with, 1 root document, with 3 nested sub docs - Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - indexWriter.addDocuments(documents); - indexWriter.commit(); - - documents.clear(); - // 1 segment with: - // 1 document, with 1 nested subdoc - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - indexWriter.addDocuments(documents); - documents.clear(); - // and 1 document, with 1 nested subdoc - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE)); - document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); - documents.add(document); - indexWriter.addDocuments(documents); - - indexWriter.commit(); - indexWriter.close(); - - IndexService indexService = createIndex("test"); - DirectoryReader directoryReader = DirectoryReader.open(directory); - directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0)); - IndexSearcher searcher = new IndexSearcher(directoryReader); - - indexService.mapperService().merge("test", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()), MapperService.MergeReason.MAPPING_UPDATE, false); - SearchContext context = createSearchContext(indexService); - - AggregatorFactories.Builder builder = AggregatorFactories.builder(); - NestedAggregationBuilder factory = new NestedAggregationBuilder("test", "nested_field"); - builder.addAggregator(factory); - AggregatorFactories factories = builder.build(context, null); - context.aggregations(new SearchContextAggregations(factories)); - Aggregator[] aggs = factories.createTopLevelAggregators(); - BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs)); - collector.preCollection(); - // A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here (otherwise MatchAllDocsQuery would be sufficient) - // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because - // root doc type#3 and root doc type#1 have the same segment docid - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(Queries.newNonNestedFilter(), Occur.MUST); - bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT); - searcher.search(new ConstantScoreQuery(bq.build()), collector); - collector.postCollection(); - - Nested nested = (Nested) aggs[0].buildAggregation(0); - // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first segment are emitted as hits. - assertThat(nested.getDocCount(), equalTo(4L)); - - directoryReader.close(); - directory.close(); + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, iwc)) { + List documents = new ArrayList<>(); + + // 1 segment with, 1 root document, with 3 nested sub docs + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + iw.addDocuments(documents); + iw.commit(); + + documents.clear(); + // 1 segment with: + // 1 document, with 1 nested subdoc + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + iw.addDocuments(documents); + documents.clear(); + // and 1 document, with 1 nested subdoc + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + iw.addDocuments(documents); + + iw.commit(); + iw.close(); + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + "nested_field"); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + bq.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); + bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), BooleanClause.Occur.MUST_NOT); + + Nested nested = search(newSearcher(indexReader, true, true), + new ConstantScoreQuery(bq.build()), nestedBuilder, fieldType); + + assertEquals(NESTED_AGG, nested.getName()); + // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first segment are emitted as hits. + assertEquals(4L, nested.getDocCount()); + } + } + } + + private double generateMaxDocs(List documents, int numNestedDocs, int id, String path, String fieldName) { + return DoubleStream.of(generateDocuments(documents, numNestedDocs, id, path, fieldName)) + .max().orElse(Double.NEGATIVE_INFINITY); + } + + private double generateSumDocs(List documents, int numNestedDocs, int id, String path, String fieldName) { + return DoubleStream.of(generateDocuments(documents, numNestedDocs, id, path, fieldName)).sum(); + } + + private double[] generateDocuments(List documents, int numNestedDocs, int id, String path, String fieldName) { + + double[] values = new double[numNestedDocs]; + for (int nested = 0; nested < numNestedDocs; nested++) { + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#" + id, + UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__" + path, + TypeFieldMapper.Defaults.FIELD_TYPE)); + long value = randomNonNegativeLong() % 10000; + document.add(new SortedNumericDocValuesField(fieldName, value)); + documents.add(document); + values[nested] = value; + } + return values; } } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java deleted file mode 100644 index c4c180ab8580d..0000000000000 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.fetch.subphase; - -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.LegacyIntField; -import org.apache.lucene.document.StringField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.join.BitSetProducer; -import org.apache.lucene.search.join.QueryBitSetProducer; -import org.apache.lucene.store.Directory; -import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.fetch.subphase.InnerHitsContext.NestedInnerHits.NestedChildrenQuery; -import org.elasticsearch.test.ESTestCase; - -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; - -public class NestedChildrenFilterTests extends ESTestCase { - public void testNestedChildrenFilter() throws Exception { - int numParentDocs = scaledRandomIntBetween(0, 32); - int maxChildDocsPerParent = scaledRandomIntBetween(8, 16); - - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - for (int i = 0; i < numParentDocs; i++) { - int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent); - List docs = new ArrayList<>(numChildDocs + 1); - for (int j = 0; j < numChildDocs; j++) { - Document childDoc = new Document(); - childDoc.add(new StringField("type", "child", Field.Store.NO)); - docs.add(childDoc); - } - - Document parenDoc = new Document(); - parenDoc.add(new StringField("type", "parent", Field.Store.NO)); - parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES)); - docs.add(parenDoc); - writer.addDocuments(docs); - } - - IndexReader reader = writer.getReader(); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(reader); - FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent"))); - Query childFilter = new TermQuery(new Term("type", "child")); - int checkedParents = 0; - final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false); - for (LeafReaderContext leaf : reader.leaves()) { - DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator(); - for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) { - int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue(); - hitContext.reset(null, leaf, parentDoc, searcher); - NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext); - TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector); - assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs)); - checkedParents++; - } - } - assertThat(checkedParents, equalTo(numParentDocs)); - reader.close(); - dir.close(); - } - -} diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 027781a9b2b54..12a64d80a1489 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -51,7 +51,8 @@ protected Collection> nodePlugins() { } public void testSimpleBoundingBoxTest() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); @@ -122,7 +123,8 @@ public void testSimpleBoundingBoxTest() throws Exception { } public void testLimit2BoundingBox() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); @@ -174,7 +176,8 @@ public void testLimit2BoundingBox() throws Exception { } public void testCompleteLonRange() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 3594f51c722bc..fdaf31264335d 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -101,7 +101,8 @@ static Double distanceScript(Map vars, Function> nodePlugins() { @Override protected void setupSuiteScopeCluster() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index a1cc9b4d4dced..6eff821c5c35b 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -58,7 +58,8 @@ protected Collection> nodePlugins() { } public void testDistanceSortingMVFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("locations").field("type", "geo_point"); @@ -187,7 +188,8 @@ public void testDistanceSortingMVFields() throws Exception { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("locations").field("type", "geo_point"); @@ -231,7 +233,8 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { } public void testDistanceSortingNestedFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties") .startObject("name").field("type", "text").endObject().startObject("branches").field("type", "nested") @@ -379,7 +382,8 @@ public void testDistanceSortingNestedFields() throws Exception { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; double lon = -73.998776; diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 6529b990255c3..200043a6668ab 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -69,7 +69,8 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce * |___________________________ * 1 2 3 4 5 6 7 */ - Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -134,7 +135,8 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ - Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -194,7 +196,8 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept * |______________________ * 1 2 3 4 5 6 */ - Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java index 15c05a5622625..a9639c7b465ac 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.suggest.document.ContextSuggestField; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,6 +32,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; @@ -121,6 +123,106 @@ public void testIndexingWithSimpleContexts() throws Exception { assertContextSuggestFields(fields, 3); } + public void testIndexingWithSimpleNumberContexts() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); + MappedFieldType completionFieldType = fieldMapper.fieldType(); + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() + .startObject() + .startArray("completion") + .startObject() + .array("input", "suggestion5", "suggestion6", "suggestion7") + .startObject("contexts") + .field("ctx", 100) + .endObject() + .field("weight", 5) + .endObject() + .endArray() + .endObject() + .bytes(), + XContentType.JSON)); + IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); + assertContextSuggestFields(fields, 3); + } + + public void testIndexingWithSimpleBooleanContexts() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); + MappedFieldType completionFieldType = fieldMapper.fieldType(); + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() + .startObject() + .startArray("completion") + .startObject() + .array("input", "suggestion5", "suggestion6", "suggestion7") + .startObject("contexts") + .field("ctx", true) + .endObject() + .field("weight", 5) + .endObject() + .endArray() + .endObject() + .bytes(), + XContentType.JSON)); + IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); + assertContextSuggestFields(fields, 3); + } + + public void testIndexingWithSimpleNULLContexts() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + XContentBuilder builder = jsonBuilder() + .startObject() + .startArray("completion") + .startObject() + .array("input", "suggestion5", "suggestion6", "suggestion7") + .startObject("contexts") + .nullField("ctx") + .endObject() + .field("weight", 5) + .endObject() + .endArray() + .endObject(); + + Exception e = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(SourceToParse.source("test", "type1", "1", builder.bytes(), XContentType.JSON))); + assertEquals("contexts must be a string, number or boolean or a list of string, number or boolean, but was [VALUE_NULL]", e.getCause().getMessage()); + } + public void testIndexingWithContextList() throws Exception { String mapping = jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("completion") @@ -153,6 +255,68 @@ public void testIndexingWithContextList() throws Exception { assertContextSuggestFields(fields, 3); } + public void testIndexingWithMixedTypeContextList() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); + MappedFieldType completionFieldType = fieldMapper.fieldType(); + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() + .startObject() + .startObject("completion") + .array("input", "suggestion5", "suggestion6", "suggestion7") + .startObject("contexts") + .array("ctx", "ctx1", true, 100) + .endObject() + .field("weight", 5) + .endObject() + .endObject() + .bytes(), + XContentType.JSON)); + IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); + assertContextSuggestFields(fields, 3); + } + + public void testIndexingWithMixedTypeContextListHavingNULL() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("completion") + .array("input", "suggestion5", "suggestion6", "suggestion7") + .startObject("contexts") + .array("ctx", "ctx1", true, 100, null) + .endObject() + .field("weight", 5) + .endObject() + .endObject(); + + Exception e = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(SourceToParse.source("test", "type1", "1", builder.bytes(), XContentType.JSON))); + assertEquals("context array must have string, number or boolean values, but was [VALUE_NULL]", e.getCause().getMessage()); + } + public void testIndexingWithMultipleContexts() throws Exception { String mapping = jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("completion") @@ -203,6 +367,37 @@ public void testQueryContextParsingBasic() throws Exception { assertThat(internalQueryContexts.get(0).isPrefix, equalTo(false)); } + public void testBooleanQueryContextParsingBasic() throws Exception { + XContentBuilder builder = jsonBuilder().value(true); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); + assertThat(internalQueryContexts.size(), equalTo(1)); + assertThat(internalQueryContexts.get(0).context, equalTo("true")); + assertThat(internalQueryContexts.get(0).boost, equalTo(1)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(false)); + } + + public void testNumberQueryContextParsingBasic() throws Exception { + XContentBuilder builder = jsonBuilder().value(10); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); + assertThat(internalQueryContexts.size(), equalTo(1)); + assertThat(internalQueryContexts.get(0).context, equalTo("10")); + assertThat(internalQueryContexts.get(0).boost, equalTo(1)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(false)); + } + + public void testNULLQueryContextParsingBasic() throws Exception { + XContentBuilder builder = jsonBuilder().nullValue(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + + Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(createParseContext(parser))); + assertEquals("category context must be an object, string, number or boolean", e.getMessage()); + } + public void testQueryContextParsingArray() throws Exception { XContentBuilder builder = jsonBuilder().startArray() .value("context1") @@ -220,6 +415,46 @@ public void testQueryContextParsingArray() throws Exception { assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); } + public void testQueryContextParsingMixedTypeValuesArray() throws Exception { + XContentBuilder builder = jsonBuilder().startArray() + .value("context1") + .value("context2") + .value(true) + .value(10) + .endArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); + assertThat(internalQueryContexts.size(), equalTo(4)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(1)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(1).context, equalTo("context2")); + assertThat(internalQueryContexts.get(1).boost, equalTo(1)); + assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(2).context, equalTo("true")); + assertThat(internalQueryContexts.get(2).boost, equalTo(1)); + assertThat(internalQueryContexts.get(2).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(3).context, equalTo("10")); + assertThat(internalQueryContexts.get(3).boost, equalTo(1)); + assertThat(internalQueryContexts.get(3).isPrefix, equalTo(false)); + } + + public void testQueryContextParsingMixedTypeValuesArrayHavingNULL() throws Exception { + XContentBuilder builder = jsonBuilder().startArray() + .value("context1") + .value("context2") + .value(true) + .value(10) + .nullValue() + .endArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + + Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(createParseContext(parser))); + assertEquals("category context must be an object, string, number or boolean", e.getMessage()); + } + public void testQueryContextParsingObject() throws Exception { XContentBuilder builder = jsonBuilder().startObject() .field("context", "context1") @@ -235,6 +470,48 @@ public void testQueryContextParsingObject() throws Exception { assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); } + public void testQueryContextParsingObjectHavingBoolean() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("context", false) + .field("boost", 10) + .field("prefix", true) + .endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); + assertThat(internalQueryContexts.size(), equalTo(1)); + assertThat(internalQueryContexts.get(0).context, equalTo("false")); + assertThat(internalQueryContexts.get(0).boost, equalTo(10)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); + } + + public void testQueryContextParsingObjectHavingNumber() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("context", 333) + .field("boost", 10) + .field("prefix", true) + .endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); + assertThat(internalQueryContexts.size(), equalTo(1)); + assertThat(internalQueryContexts.get(0).context, equalTo("333")); + assertThat(internalQueryContexts.get(0).boost, equalTo(10)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); + } + + public void testQueryContextParsingObjectHavingNULL() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .nullField("context") + .field("boost", 10) + .field("prefix", true) + .endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + + Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(createParseContext(parser))); + assertEquals("category context must be a string, number or boolean", e.getMessage()); + } public void testQueryContextParsingObjectArray() throws Exception { XContentBuilder builder = jsonBuilder().startArray() @@ -261,6 +538,82 @@ public void testQueryContextParsingObjectArray() throws Exception { assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); } + public void testQueryContextParsingMixedTypeObjectArray() throws Exception { + XContentBuilder builder = jsonBuilder().startArray() + .startObject() + .field("context", "context1") + .field("boost", 2) + .field("prefix", true) + .endObject() + .startObject() + .field("context", "context2") + .field("boost", 3) + .field("prefix", false) + .endObject() + .startObject() + .field("context", true) + .field("boost", 3) + .field("prefix", false) + .endObject() + .startObject() + .field("context", 333) + .field("boost", 3) + .field("prefix", false) + .endObject() + .endArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); + assertThat(internalQueryContexts.size(), equalTo(4)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(2)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); + assertThat(internalQueryContexts.get(1).context, equalTo("context2")); + assertThat(internalQueryContexts.get(1).boost, equalTo(3)); + assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(2).context, equalTo("true")); + assertThat(internalQueryContexts.get(2).boost, equalTo(3)); + assertThat(internalQueryContexts.get(2).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(3).context, equalTo("333")); + assertThat(internalQueryContexts.get(3).boost, equalTo(3)); + assertThat(internalQueryContexts.get(3).isPrefix, equalTo(false)); + } + + public void testQueryContextParsingMixedTypeObjectArrayHavingNULL() throws Exception { + XContentBuilder builder = jsonBuilder().startArray() + .startObject() + .field("context", "context1") + .field("boost", 2) + .field("prefix", true) + .endObject() + .startObject() + .field("context", "context2") + .field("boost", 3) + .field("prefix", false) + .endObject() + .startObject() + .field("context", true) + .field("boost", 3) + .field("prefix", false) + .endObject() + .startObject() + .field("context", 333) + .field("boost", 3) + .field("prefix", false) + .endObject() + .startObject() + .nullField("context") + .field("boost", 3) + .field("prefix", false) + .endObject() + .endArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + + Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(createParseContext(parser))); + assertEquals("category context must be a string, number or boolean", e.getMessage()); + } + private static QueryParseContext createParseContext(XContentParser parser) { return new QueryParseContext(parser); } @@ -273,17 +626,52 @@ public void testQueryContextParsingMixed() throws Exception { .field("prefix", true) .endObject() .value("context2") + .value(false) + .startObject() + .field("context", 333) + .field("boost", 2) + .field("prefix", true) + .endObject() .endArray(); XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); List internalQueryContexts = mapping.parseQueryContext(createParseContext(parser)); - assertThat(internalQueryContexts.size(), equalTo(2)); + assertThat(internalQueryContexts.size(), equalTo(4)); assertThat(internalQueryContexts.get(0).context, equalTo("context1")); assertThat(internalQueryContexts.get(0).boost, equalTo(2)); assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); assertThat(internalQueryContexts.get(1).context, equalTo("context2")); assertThat(internalQueryContexts.get(1).boost, equalTo(1)); assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(2).context, equalTo("false")); + assertThat(internalQueryContexts.get(2).boost, equalTo(1)); + assertThat(internalQueryContexts.get(2).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(3).context, equalTo("333")); + assertThat(internalQueryContexts.get(3).boost, equalTo(2)); + assertThat(internalQueryContexts.get(3).isPrefix, equalTo(true)); + } + + public void testQueryContextParsingMixedHavingNULL() throws Exception { + XContentBuilder builder = jsonBuilder().startArray() + .startObject() + .field("context", "context1") + .field("boost", 2) + .field("prefix", true) + .endObject() + .value("context2") + .value(false) + .startObject() + .field("context", 333) + .field("boost", 2) + .field("prefix", true) + .endObject() + .nullValue() + .endArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes()); + CategoryContextMapping mapping = ContextBuilder.category("cat").build(); + + Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(createParseContext(parser))); + assertEquals("category context must be an object, string, number or boolean", e.getMessage()); } public void testParsingContextFromDocument() throws Exception { diff --git a/distribution/bwc-zip/build.gradle b/distribution/bwc-zip/build.gradle index ae7d366eab97a..7bb5cce51b60c 100644 --- a/distribution/bwc-zip/build.gradle +++ b/distribution/bwc-zip/build.gradle @@ -65,6 +65,7 @@ task addUpstream(type: LoggedExec) { } task fetchLatest(type: LoggedExec) { + onlyIf { project.gradle.startParameter.isOffline() == false } dependsOn addUpstream workingDir = checkoutDir commandLine = ['git', 'fetch', 'upstream'] diff --git a/docs/plugins/redirects.asciidoc b/docs/plugins/redirects.asciidoc index 0f9c0b40f2c8c..ccda0d9ff06b9 100644 --- a/docs/plugins/redirects.asciidoc +++ b/docs/plugins/redirects.asciidoc @@ -13,12 +13,13 @@ one of the <>. [role="exclude",id="cloud-aws"] === AWS Cloud Plugin -The `cloud-aws` plugin has been split into two separate plugins: +Looking for a hosted solution for Elasticsearch on AWS? Check out http://www.elastic.co/cloud. + +The Elasticsearch `cloud-aws` plugin has been split into two separate plugins: * <> (`discovery-ec2`) * <> (`repository-s3`) - [role="exclude",id="cloud-azure"] === Azure Cloud Plugin diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index 586e582a05f8f..f2fdd9a16de82 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -11,7 +11,7 @@ the execution defines what this document set is (e.g. a top-level aggregation ex query/filters of the search request). There are many different types of aggregations, each with its own purpose and output. To better understand these types, -it is often easier to break them into three main families: +it is often easier to break them into four main families: <>:: A family of aggregations that build buckets, where each bucket is associated with a _key_ and a document diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc new file mode 100644 index 0000000000000..304e7c8de0b4c --- /dev/null +++ b/docs/reference/cluster/remote-info.asciidoc @@ -0,0 +1,35 @@ +[[cluster-remote-info]] +== Remote Cluster Info + +The cluster remote info API allows to retrieve all of the configured +remote cluster information. + +[source,js] +---------------------------------- +GET /_remote/info +---------------------------------- +// CONSOLE + +This command returns returns connection and endpoint information keyed by +the configured remote cluster alias. + +[float] +[[connection-info]] + +`seeds`:: + The configured initial seed transport addresses of the remote cluster. + +`http_addresses`:: + The published http addresses of all connected remote nodes. + +`connected`:: + True if there is at least one connection to the remote cluster. + +`num_nodes_connected`:: + The number of connected nodes in the remote cluster. + +`max_connection_per_cluster`:: + The maximum number of connections maintained for the remote cluster. + +`initial_connect_timeout`:: + The initial connect timeout for remote cluster connections. diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index afb5088d85e0a..873021c420636 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -40,11 +40,6 @@ index settings, aliases, mappings, and index templates. * <> * <> -[float] -[[shadow-replicas]] -== Replica configurations -* <> - [float] [[monitoring]] == Monitoring: @@ -95,8 +90,6 @@ include::indices/analyze.asciidoc[] include::indices/templates.asciidoc[] -include::indices/shadow-replicas.asciidoc[] - include::indices/stats.asciidoc[] include::indices/segments.asciidoc[] diff --git a/docs/reference/indices/shadow-replicas.asciidoc b/docs/reference/indices/shadow-replicas.asciidoc deleted file mode 100644 index dd255a0e644ed..0000000000000 --- a/docs/reference/indices/shadow-replicas.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -[[indices-shadow-replicas]] -== Shadow replica indices - -deprecated[5.2.0, Shadow replicas don't see much usage and we are planning to remove them] - -If you would like to use a shared filesystem, you can use the shadow replicas -settings to choose where on disk the data for an index should be kept, as well -as how Elasticsearch should replay operations on all the replica shards of an -index. - -In order to fully utilize the `index.data_path` and `index.shadow_replicas` -settings, you need to allow Elasticsearch to use the same data directory for -multiple instances by setting `node.add_lock_id_to_custom_path` to false in -elasticsearch.yml: - -[source,yaml] --------------------------------------------------- -node.add_lock_id_to_custom_path: false --------------------------------------------------- - -You will also need to indicate to the security manager where the custom indices -will be, so that the correct permissions can be applied. You can do this by -setting the `path.shared_data` setting in elasticsearch.yml: - -[source,yaml] --------------------------------------------------- -path.shared_data: /opt/data --------------------------------------------------- - -This means that Elasticsearch can read and write to files in any subdirectory of -the `path.shared_data` setting. - -You can then create an index with a custom data path, where each node will use -this path for the data: - -[WARNING] -======================== -Because shadow replicas do not index the document on replica shards, it's -possible for the replica's known mapping to be behind the index's known mapping -if the latest cluster state has not yet been processed on the node containing -the replica. Because of this, it is highly recommended to use pre-defined -mappings when using shadow replicas. -======================== - -[source,js] --------------------------------------------------- -PUT /my_index -{ - "index" : { - "number_of_shards" : 1, - "number_of_replicas" : 4, - "data_path": "/opt/data/my_index", - "shadow_replicas": true - } -} --------------------------------------------------- -// CONSOLE -// TEST[skip:no way to configure path.shared_data for /opt/data] - -[WARNING] -======================== -In the above example, the "/opt/data/my_index" path is a shared filesystem that -must be available on every node in the Elasticsearch cluster. You must also -ensure that the Elasticsearch process has the correct permissions to read from -and write to the directory used in the `index.data_path` setting. -======================== - -The `data_path` does not have to contain the index name, in this case, -"my_index" was used but it could easily also have been "/opt/data/" - -An index that has been created with the `index.shadow_replicas` setting set to -"true" will not replicate document operations to any of the replica shards, -instead, it will only continually refresh. Once segments are available on the -filesystem where the shadow replica resides (after an Elasticsearch "flush"), a -regular refresh (governed by the `index.refresh_interval`) can be used to make -the new data searchable. - -NOTE: Since documents are only indexed on the primary shard, realtime GET -requests could fail to return a document if executed on the replica shard, -therefore, GET API requests automatically have the `?preference=_primary` flag -set if there is no preference flag already set. - -In order to ensure the data is being synchronized in a fast enough manner, you -may need to tune the flush threshold for the index to a desired number. A flush -is needed to fsync segment files to disk, so they will be visible to all other -replica nodes. Users should test what flush threshold levels they are -comfortable with, as increased flushing can impact indexing performance. - -The Elasticsearch cluster will still detect the loss of a primary shard, and -transform the replica into a primary in this situation. This transformation will -take slightly longer, since no `IndexWriter` is maintained for each shadow -replica. - -Below is the list of settings that can be changed using the update -settings API: - -`index.data_path` (string):: - Path to use for the index's data. Note that by default Elasticsearch will - append the node ordinal by default to the path to ensure multiple instances - of Elasticsearch on the same machine do not share a data directory. - -`index.shadow_replicas`:: - Boolean value indicating this index should use shadow replicas. Defaults to - `false`. - -`index.shared_filesystem`:: - Boolean value indicating this index uses a shared filesystem. Defaults to - the `true` if `index.shadow_replicas` is set to true, `false` otherwise. - -`index.shared_filesystem.recover_on_any_node`:: - Boolean value indicating whether the primary shards for the index should be - allowed to recover on any node in the cluster. If a node holding a copy of - the shard is found, recovery prefers that node. Defaults to `false`. - -=== Node level settings related to shadow replicas - -These are non-dynamic settings that need to be configured in `elasticsearch.yml` - -`node.add_lock_id_to_custom_path`:: - Boolean setting indicating whether Elasticsearch should append the node's - ordinal to the custom data path. For example, if this is enabled and a path - of "/tmp/foo" is used, the first locally-running node will use "/tmp/foo/0", - the second will use "/tmp/foo/1", the third "/tmp/foo/2", etc. Defaults to - `true`. diff --git a/docs/reference/migration/migrate_6_0/indices.asciidoc b/docs/reference/migration/migrate_6_0/indices.asciidoc index 7062ac7cb1e4f..2e198be59cb9e 100644 --- a/docs/reference/migration/migrate_6_0/indices.asciidoc +++ b/docs/reference/migration/migrate_6_0/indices.asciidoc @@ -29,6 +29,11 @@ PUT _template/template_2 // CONSOLE -=== Shadow Replicas are deprecated +=== Shadow Replicas have been removed -<> don't see much usage and we are planning to remove them. +Shadow replicas don't see enough usage, and have been removed. This includes the +following settings: + +- `index.shared_filesystem` +- `index.shadow_replicas` +- `node.add_lock_id_to_custom_path` diff --git a/docs/reference/migration/migrate_6_0/settings.asciidoc b/docs/reference/migration/migrate_6_0/settings.asciidoc index ec25ffb601cad..44acb99939445 100644 --- a/docs/reference/migration/migrate_6_0/settings.asciidoc +++ b/docs/reference/migration/migrate_6_0/settings.asciidoc @@ -1,6 +1,32 @@ [[breaking_60_settings_changes]] === Settings changes +==== Duplicate keys in configuration file + +In previous versions of Elasticsearch, the configuration file was allowed to +contain duplicate keys. For example: + +[source,yaml] +-------------------------------------------------- +node: + name: my-node + +node + attr: + rack: my-rack +-------------------------------------------------- + +In Elasticsearch 6.0.0, this is no longer permitted. Instead, this must be +specified in a single key as: + +[source,yaml] +-------------------------------------------------- +node: + name: my-node + attr: + rack: my-rack +-------------------------------------------------- + ==== Coercion of boolean settings Previously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0 diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/modules/scripting/security.asciidoc index be1806175c1aa..eaeb1c62143f3 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/modules/scripting/security.asciidoc @@ -51,12 +51,11 @@ Bad: [[modules-scripting-security-do-no-weaken]] === Do not weaken script security settings By default Elasticsearch will run inline, stored, and filesystem scripts for -sandboxed languages, namely the scripting language Painless, the template +the builtin languages, namely the scripting language Painless, the template language Mustache, and the expression language Expressions. These *ought* to be safe to expose to trusted users and to your application servers because they -have strong security sandboxes. By default Elasticsearch will only run -filesystem scripts for non-sandboxed languages and enabling them is a poor -choice because: +have strong security sandboxes. The Elasticsearch committers do not support any +non-sandboxed scripting languages and using any would be a poor choice because: 1. This drops a layer of security, leaving only Elasticsearch's builtin <>. 2. Non-sandboxed scripts have unchecked access to Elasticsearch's internals and @@ -130,8 +129,8 @@ in the following form: `${pluginName}_${operation}`. The following example disables scripting for `update` and `plugin` operations, regardless of the script source or language. Scripts can still be executed -from sandboxed languages as part of `aggregations`, `search` and plugins -execution though, as the above defaults still get applied. +as part of `aggregations`, `search` and plugins execution though, as the above +defaults still get applied. [source,yaml] ----------------------------------- diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index eccba57dee1fe..41ba6e5c87ab8 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -60,9 +60,10 @@ GET /_search?q=tag:wow // CONSOLE // TEST[setup:twitter] -By default elasticsearch rejects search requests that would query more than -1000 shards. The reason is that such large numbers of shards make the job of -the coordinating node very CPU and memory intensive. It is usually a better -idea to organize data in such a way that there are fewer larger shards. In -case you would like to bypass this limit, which is discouraged, you can update -the `action.search.shard_count.limit` cluster setting to a greater value. +By default elasticsearch doesn't reject any search requests based on the number +of shards the request hits. While elasticsearch will optimize the search execution +on the coordinating node a large number of shards can have a significant impact +CPU and memory wise. It is usually a better idea to organize data in such a way +that there are fewer larger shards. In case you would like to configure a soft +limit, you can update the `action.search.shard_count.limit` cluster setting in order +to reject search requests that hit too many shards. diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java index 2722388f028b4..16995f60dffc9 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java @@ -19,6 +19,8 @@ * under the License. */ +import org.apache.lucene.util.Constants; + // TODO: Figure out a way to test autobox caching properly from methods such as Integer.valueOf(int); public class EqualsTests extends ScriptTestCase { public void testTypesEquals() { @@ -130,10 +132,11 @@ public void testBranchEquals() { } public void testBranchEqualsDefAndPrimitive() { + assumeFalse("test fails on Windows", Constants.WINDOWS); assertEquals(true, exec("def x = 1000; int y = 1000; return x == y;")); - exec("def x = 1000; int y = 1000; return x === y;"); + assertEquals(false, exec("def x = 1000; int y = 1000; return x === y;")); assertEquals(true, exec("def x = 1000; int y = 1000; return y == x;")); - exec("def x = 1000; int y = 1000; return y === x;"); + assertEquals(false, exec("def x = 1000; int y = 1000; return y === x;")); } public void testBranchNotEquals() { @@ -147,10 +150,11 @@ public void testBranchNotEquals() { } public void testBranchNotEqualsDefAndPrimitive() { + assumeFalse("test fails on Windows", Constants.WINDOWS); assertEquals(false, exec("def x = 1000; int y = 1000; return x != y;")); - exec("def x = 1000; int y = 1000; return x !== y;"); + assertEquals(true, exec("def x = 1000; int y = 1000; return x !== y;")); assertEquals(false, exec("def x = 1000; int y = 1000; return y != x;")); - exec("def x = 1000; int y = 1000; return y !== x;"); + assertEquals(true, exec("def x = 1000; int y = 1000; return y !== x;")); } public void testRightHandNull() { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 2aa032154bb18..d8f246b74ef86 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -35,8 +35,6 @@ import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -73,7 +71,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.io.IOException; @@ -85,7 +82,6 @@ import java.util.Map; import java.util.function.Function; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; @@ -485,7 +481,6 @@ private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws I } public void testEmptyName() throws Exception { - // after 5.x String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("").field("type", "percolator").endObject().endObject() .endObject().endObject().string(); @@ -495,14 +490,6 @@ public void testEmptyName() throws Exception { () -> parser.parse("type1", new CompressedXContent(mapping)) ); assertThat(e.getMessage(), containsString("name cannot be empty string")); - - // before 5.x - Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); - Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); - DocumentMapperParser parser2x = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser(); - - DocumentMapper defaultMapper = parser2x.parse("type1", new CompressedXContent(mapping)); - assertEquals(mapping, defaultMapper.mappingSource().string()); } public void testImplicitlySetDefaultScriptLang() throws Exception { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java index 26786bbd74faf..88329f5cb17b4 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -54,14 +55,14 @@ public TransportRethrottleAction(Settings settings, ThreadPool threadPool, Clust @Override protected void taskOperation(RethrottleRequest request, BulkByScrollTask task, ActionListener listener) { - rethrottle(clusterService.localNode().getId(), client, task, request.getRequestsPerSecond(), listener); + rethrottle(logger, clusterService.localNode().getId(), client, task, request.getRequestsPerSecond(), listener); } - static void rethrottle(String localNodeId, Client client, BulkByScrollTask task, float newRequestsPerSecond, + static void rethrottle(Logger logger, String localNodeId, Client client, BulkByScrollTask task, float newRequestsPerSecond, ActionListener listener) { int runningSubTasks = task.runningSliceSubTasks(); if (runningSubTasks == 0) { - // Nothing to do, all sub tasks are done + logger.debug("rethrottling local task [{}] to [{}] requests per second", task.getId(), newRequestsPerSecond); task.rethrottle(newRequestsPerSecond); listener.onResponse(task.taskInfo(localNodeId, true)); return; @@ -69,6 +70,7 @@ static void rethrottle(String localNodeId, Client client, BulkByScrollTask task, RethrottleRequest subRequest = new RethrottleRequest(); subRequest.setRequestsPerSecond(newRequestsPerSecond / runningSubTasks); subRequest.setParentTaskId(new TaskId(localNodeId, task.getId())); + logger.debug("rethrottling children of task [{}] to [{}] requests per second", task.getId(), subRequest.getRequestsPerSecond()); client.execute(RethrottleAction.INSTANCE, subRequest, ActionListener.wrap(r -> { r.rethrowFailures("Rethrottle"); listener.onResponse(task.getInfoGivenSliceInfo(localNodeId, r.getTasks())); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 036de6f0e22cc..1cd1df230a4e2 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -69,7 +69,7 @@ static Map initialSearchParams(SearchRequest searchRequest, Vers if (searchRequest.source().sorts() != null) { boolean useScan = false; // Detect if we should use search_type=scan rather than a sort - if (remoteVersion.before(Version.V_2_1_0)) { + if (remoteVersion.before(Version.fromId(2010099))) { for (SortBuilder sort : searchRequest.source().sorts()) { if (sort instanceof FieldSortBuilder) { FieldSortBuilder f = (FieldSortBuilder) sort; @@ -90,7 +90,7 @@ static Map initialSearchParams(SearchRequest searchRequest, Vers params.put("sort", sorts.toString()); } } - if (remoteVersion.before(Version.V_2_0_0)) { + if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 need prompting to return interesting fields. Note that timestamp isn't available at all.... searchRequest.source().storedField("_parent").storedField("_routing").storedField("_ttl"); } @@ -172,7 +172,7 @@ static Map scrollParams(TimeValue keepAlive) { } static HttpEntity scrollEntity(String scroll, Version remoteVersion) { - if (remoteVersion.before(Version.V_2_0_0)) { + if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body return new StringEntity(scroll, ContentType.TEXT_PLAIN); } @@ -186,7 +186,7 @@ static HttpEntity scrollEntity(String scroll, Version remoteVersion) { } static HttpEntity clearScrollEntity(String scroll, Version remoteVersion) { - if (remoteVersion.before(Version.V_2_0_0)) { + if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body return new StringEntity(scroll, ContentType.TEXT_PLAIN); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 974fd9438d2b6..6b7b6ca3aa070 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -128,7 +128,8 @@ public void onFailure(Exception e) { private void logFailure(Exception e) { if (e instanceof ResponseException) { ResponseException re = (ResponseException) e; - if (remoteVersion.before(Version.V_2_0_0) && re.getResponse().getStatusLine().getStatusCode() == 404) { + if (remoteVersion.before(Version.fromId(2000099)) + && re.getResponse().getStatusLine().getStatusCode() == 404) { logger.debug((Supplier) () -> new ParameterizedMessage( "Failed to clear scroll [{}] from pre-2.0 Elasticsearch. This is normal if the request terminated " + "normally as the scroll has already been cleared automatically.", scrollId), e); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 2785d53507899..436ebec439b51 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.ingest.IngestTestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; import org.junit.Before; @@ -60,6 +61,7 @@ * different cancellation places - that is the responsibility of AsyncBulkByScrollActionTests which have more precise control to * simulate failures but do not exercise important portion of the stack like transport and task management. */ +@TestLogging("org.elasticsearch.action.bulk.byscroll:DEBUG,org.elasticsearch.index.reindex:DEBUG") public class CancelTests extends ReindexTestCase { protected static final String INDEX = "reindex-cancel-index"; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java index 14ae9b5abb842..b5572c1f34f30 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java @@ -73,7 +73,7 @@ private void rethrottleTestCase(int runningSlices, Consumer listener = mock(ActionListener.class); - TransportRethrottleAction.rethrottle(localNodeId, client, task, newRequestsPerSecond, listener); + TransportRethrottleAction.rethrottle(logger, localNodeId, client, task, newRequestsPerSecond, listener); // Capture the sub request and the listener so we can verify they are sane ArgumentCaptor subRequest = ArgumentCaptor.forClass(RethrottleRequest.class); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 779c89f7ee889..8c082227f8686 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -96,12 +96,12 @@ public void testInitialSearchParamsSort() { SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); // Test sort:_doc for versions that support it. - Version remoteVersion = Version.fromId(between(Version.V_2_1_0_ID, Version.CURRENT.id)); + Version remoteVersion = Version.fromId(between(2010099, Version.CURRENT.id)); searchRequest.source().sort("_doc"); assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sort", "_doc:asc")); // Test search_type scan for versions that don't support sort:_doc. - remoteVersion = Version.fromId(between(0, Version.V_2_1_0_ID - 1)); + remoteVersion = Version.fromId(between(0, 2010099 - 1)); assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("search_type", "scan")); // Test sorting by some field. Version doesn't matter. diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 2a67306425c29..f63b05e96beb7 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -144,7 +144,7 @@ public void testLookupRemoteVersion() throws Exception { assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/2_3_3.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_2_3_3, v); + assertEquals(Version.fromId(2030399), v); called.set(true); }); assertTrue(called.get()); diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index e16a8f05203dd..6afefc5cf0361 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -22,10 +22,7 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -39,7 +36,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.util.Arrays; @@ -47,7 +43,6 @@ import java.util.Collections; import java.util.function.Supplier; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.hamcrest.Matchers.containsString; public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { @@ -157,20 +152,5 @@ public void testEmptyName() throws Exception { () -> parser.parse("type", new CompressedXContent(mapping)) ); assertThat(e.getMessage(), containsString("name cannot be empty string")); - - // before 5.x - Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); - Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); - IndexService indexService2x = createIndex("test_old", oldIndexSettings); - - Supplier queryShardContext = () -> { - return indexService2x.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); - }; - DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), - indexService2x.getIndexAnalyzers(), indexService2x.xContentRegistry(), indexService2x.similarityService(), mapperRegistry, - queryShardContext); - - DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, defaultMapper.mappingSource().string()); } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index aab56b97c465d..50f2ac571a424 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -54,6 +54,11 @@ bundlePlugin { } } +integTestCluster { + keystoreSetting 's3.client.default.access_key', 'myaccesskey' + keystoreSetting 's3.client.default.secret_key', 'mysecretkey' +} + thirdPartyAudit.excludes = [ // classes are missing 'javax.servlet.ServletContextEvent', diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java index e91faa2ebf288..872e713c54607 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java @@ -150,5 +150,5 @@ interface CLOUD_S3 { /** * Creates an {@code AmazonS3} client from the given repository metadata and node settings. */ - AmazonS3 client(RepositoryMetaData metadata, Settings repositorySettings); + AmazonS3 client(Settings repositorySettings); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index a9dbb61c44d2f..eb2f22782f449 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -38,13 +38,11 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import static org.elasticsearch.repositories.s3.S3Repository.getValue; @@ -53,92 +51,84 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se // pkg private for tests static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - /** - * (acceskey, endpoint) -> client - */ - private Map, AmazonS3Client> clients = new HashMap<>(); + private final Map clientsSettings; - InternalAwsS3Service(Settings settings) { + private final Map clientsCache = new HashMap<>(); + + InternalAwsS3Service(Settings settings, Map clientsSettings) { super(settings); + this.clientsSettings = clientsSettings; } @Override - public synchronized AmazonS3 client(RepositoryMetaData metadata, Settings repositorySettings) { + public synchronized AmazonS3 client(Settings repositorySettings) { String clientName = CLIENT_NAME.get(repositorySettings); - String foundEndpoint = findEndpoint(logger, repositorySettings, settings, clientName); - - AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, settings, repositorySettings, clientName); - - Tuple clientDescriptor = new Tuple<>(foundEndpoint, credentials.getCredentials().getAWSAccessKeyId()); - AmazonS3Client client = clients.get(clientDescriptor); + AmazonS3Client client = clientsCache.get(clientName); if (client != null) { return client; } - Integer maxRetries = getValue(metadata.settings(), settings, + S3ClientSettings clientSettings = clientsSettings.get(clientName); + if (clientSettings == null) { + throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. " + + "Existing client configs: " + + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + } + + String endpoint = findEndpoint(logger, clientSettings, repositorySettings); + Integer maxRetries = getValue(repositorySettings, settings, S3Repository.Repository.MAX_RETRIES_SETTING, S3Repository.Repositories.MAX_RETRIES_SETTING); - boolean useThrottleRetries = getValue(metadata.settings(), settings, + boolean useThrottleRetries = getValue(repositorySettings, settings, S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING); // If the user defined a path style access setting, we rely on it, // otherwise we use the default value set by the SDK Boolean pathStyleAccess = null; - if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(metadata.settings()) || - S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) { - pathStyleAccess = getValue(metadata.settings(), settings, + if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(repositorySettings) || + S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) { + pathStyleAccess = getValue(repositorySettings, settings, S3Repository.Repository.PATH_STYLE_ACCESS_SETTING, S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING); } logger.debug("creating S3 client with client_name [{}], endpoint [{}], max_retries [{}], " + - "use_throttle_retries [{}], path_style_access [{}]", - clientName, foundEndpoint, maxRetries, useThrottleRetries, pathStyleAccess); + "use_throttle_retries [{}], path_style_access [{}]", + clientName, endpoint, maxRetries, useThrottleRetries, pathStyleAccess); + + AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); + ClientConfiguration configuration = buildConfiguration(logger, clientSettings, repositorySettings, maxRetries, endpoint, useThrottleRetries); - client = new AmazonS3Client( - credentials, - buildConfiguration(logger, repositorySettings, settings, clientName, maxRetries, foundEndpoint, useThrottleRetries)); + client = new AmazonS3Client(credentials, configuration); if (pathStyleAccess != null) { client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess)); } - if (!foundEndpoint.isEmpty()) { - client.setEndpoint(foundEndpoint); + if (Strings.hasText(endpoint)) { + client.setEndpoint(endpoint); } - clients.put(clientDescriptor, client); + clientsCache.put(clientName, client); return client; } // pkg private for tests - static ClientConfiguration buildConfiguration(Logger logger, Settings repositorySettings, Settings settings, - String clientName, Integer maxRetries, String endpoint, - boolean useThrottleRetries) { + static ClientConfiguration buildConfiguration(Logger logger, S3ClientSettings clientSettings, Settings repositorySettings, + Integer maxRetries, String endpoint, boolean useThrottleRetries) { ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); - Protocol protocol = getConfigValue(repositorySettings, settings, clientName, S3Repository.PROTOCOL_SETTING, - S3Repository.Repository.PROTOCOL_SETTING, S3Repository.Repositories.PROTOCOL_SETTING); + Protocol protocol = getRepoValue(repositorySettings, S3Repository.Repository.PROTOCOL_SETTING, clientSettings.protocol); clientConfiguration.setProtocol(protocol); - String proxyHost = getConfigValue(null, settings, clientName, - S3Repository.PROXY_HOST_SETTING, null, CLOUD_S3.PROXY_HOST_SETTING); - if (Strings.hasText(proxyHost)) { - Integer proxyPort = getConfigValue(null, settings, clientName, - S3Repository.PROXY_PORT_SETTING, null, CLOUD_S3.PROXY_PORT_SETTING); - try (SecureString proxyUsername = getConfigValue(null, settings, clientName, - S3Repository.PROXY_USERNAME_SETTING, null, CLOUD_S3.PROXY_USERNAME_SETTING); - SecureString proxyPassword = getConfigValue(null, settings, clientName, - S3Repository.PROXY_PASSWORD_SETTING, null, CLOUD_S3.PROXY_PASSWORD_SETTING)) { - - clientConfiguration - .withProxyHost(proxyHost) - .withProxyPort(proxyPort) - .withProxyUsername(proxyUsername.toString()) - .withProxyPassword(proxyPassword.toString()); - } + if (Strings.hasText(clientSettings.proxyHost)) { + // TODO: remove this leniency, these settings should exist together and be validated + clientConfiguration.setProxyHost(clientSettings.proxyHost); + clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyUsername(clientSettings.proxyUsername); + clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } if (maxRetries != null) { @@ -146,64 +136,56 @@ static ClientConfiguration buildConfiguration(Logger logger, Settings repository clientConfiguration.setMaxErrorRetry(maxRetries); } clientConfiguration.setUseThrottleRetries(useThrottleRetries); - - TimeValue readTimeout = getConfigValue(null, settings, clientName, - S3Repository.READ_TIMEOUT_SETTING, null, CLOUD_S3.READ_TIMEOUT); - clientConfiguration.setSocketTimeout((int)readTimeout.millis()); + clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; } - public static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, - Settings settings, Settings repositorySettings, String clientName) { - try (SecureString key = getConfigValue(repositorySettings, settings, clientName, S3Repository.ACCESS_KEY_SETTING, - S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); - SecureString secret = getConfigValue(repositorySettings, settings, clientName, S3Repository.SECRET_KEY_SETTING, - S3Repository.Repository.SECRET_SETTING, S3Repository.Repositories.SECRET_SETTING)) { - - if (key.length() == 0 && secret.length() == 0) { - logger.debug("Using instance profile credentials"); - return new PrivilegedInstanceProfileCredentialsProvider(); - } else { - logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString())); + // pkg private for tests + static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, + S3ClientSettings clientSettings, Settings repositorySettings) { + BasicAWSCredentials credentials = clientSettings.credentials; + if (S3Repository.Repository.KEY_SETTING.exists(repositorySettings)) { + if (S3Repository.Repository.SECRET_SETTING.exists(repositorySettings) == false) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.Repository.KEY_SETTING + + " must be accompanied by setting [" + S3Repository.Repository.SECRET_SETTING + "]"); + } + // backcompat for reading keys out of repository settings + deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the elasticsearch keystore for secure settings."); + try (SecureString key = S3Repository.Repository.KEY_SETTING.get(repositorySettings); + SecureString secret = S3Repository.Repository.SECRET_SETTING.get(repositorySettings)) { + credentials = new BasicAWSCredentials(key.toString(), secret.toString()); } + } else if (S3Repository.Repository.SECRET_SETTING.exists(repositorySettings)) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.Repository.SECRET_SETTING + + " must be accompanied by setting [" + S3Repository.Repository.KEY_SETTING + "]"); + } + if (credentials == null) { + logger.debug("Using instance profile credentials"); + return new PrivilegedInstanceProfileCredentialsProvider(); + } else { + logger.debug("Using basic key/secret credentials"); + return new StaticCredentialsProvider(credentials); } } // pkg private for tests /** Returns the endpoint the client should use, based on the available endpoint settings found. */ - static String findEndpoint(Logger logger, Settings repositorySettings, Settings settings, String clientName) { - String endpoint = getConfigValue(repositorySettings, settings, clientName, S3Repository.ENDPOINT_SETTING, - S3Repository.Repository.ENDPOINT_SETTING, S3Repository.Repositories.ENDPOINT_SETTING); - if (Strings.isNullOrEmpty(endpoint)) { - // No region has been set so we will use the default endpoint - if (CLOUD_S3.ENDPOINT_SETTING.exists(settings)) { - endpoint = CLOUD_S3.ENDPOINT_SETTING.get(settings); - logger.debug("using explicit s3 endpoint [{}]", endpoint); - } - } else { + static String findEndpoint(Logger logger, S3ClientSettings clientSettings, Settings repositorySettings) { + String endpoint = getRepoValue(repositorySettings, S3Repository.Repository.ENDPOINT_SETTING, clientSettings.endpoint); + if (Strings.hasText(endpoint)) { logger.debug("using repository level endpoint [{}]", endpoint); } - return endpoint; } - /** - * Find the setting value, trying first with named configs, - * then falling back to repository and global repositories settings. - */ - private static T getConfigValue(Settings repositorySettings, Settings globalSettings, String clientName, - Setting.AffixSetting configSetting, Setting repositorySetting, Setting globalSetting) { - Setting concreteSetting = configSetting.getConcreteSettingForNamespace(clientName); - if (concreteSetting.exists(globalSettings)) { - return concreteSetting.get(globalSettings); - } else if (repositorySetting == null) { - // no repository setting, just use global setting - return globalSetting.get(globalSettings); - } else { - return getValue(repositorySettings, globalSettings, repositorySetting, globalSetting); + /** Returns the value for a given setting from the repository, or returns the fallback value. */ + private static T getRepoValue(Settings repositorySettings, Setting repositorySetting, T fallback) { + if (repositorySetting.exists(repositorySettings)) { + return repositorySetting.get(repositorySettings); } + return fallback; } @Override @@ -216,7 +198,7 @@ protected void doStop() throws ElasticsearchException { @Override protected void doClose() throws ElasticsearchException { - for (AmazonS3Client client : clients.values()) { + for (AmazonS3Client client : clientsCache.values()) { client.shutdown(); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java new file mode 100644 index 0000000000000..edaf44289c689 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.BasicAWSCredentials; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.repositories.s3.AwsS3Service.CLOUD_S3; + +/** + * A container for settings used to create an S3 client. + */ +class S3ClientSettings { + + // prefix for s3 client settings + private static final String PREFIX = "s3.client."; + + /** The access key (ie login id) for connecting to s3. */ + static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", + key -> SecureSetting.secureString(key, S3Repository.Repositories.KEY_SETTING)); + + /** The secret key (ie password) for connecting to s3. */ + static final Setting.AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", + key -> SecureSetting.secureString(key, S3Repository.Repositories.SECRET_SETTING)); + + /** An override for the s3 endpoint to connect to. */ + static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", + key -> new Setting<>(key, S3Repository.Repositories.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), + Setting.Property.NodeScope)); + + /** The protocol to use to connect to s3. */ + static final Setting.AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting(PREFIX, "protocol", + key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Setting.Property.NodeScope)); + + /** The host name of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting(PREFIX, "proxy.host", + key -> Setting.simpleString(key, Setting.Property.NodeScope)); + + /** The port of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting(PREFIX, "proxy.port", + key -> Setting.intSetting(key, 80, 0, 1<<16, Setting.Property.NodeScope)); + + /** The username of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username", + key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING)); + + /** The password of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password", + key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING)); + + /** The socket timeout for connecting to s3. */ + static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", + key -> Setting.timeSetting(key, TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), + Setting.Property.NodeScope)); + + /** Credentials to authenticate with s3. */ + final BasicAWSCredentials credentials; + + /** The s3 endpoint the client should talk to, or empty string to use the default. */ + final String endpoint; + + /** The protocol to use to talk to s3. Defaults to https. */ + final Protocol protocol; + + /** An optional proxy host that requests to s3 should be made through. */ + final String proxyHost; + + /** The port number the proxy host should be connected on. */ + final int proxyPort; + + // these should be "secure" yet the api for the s3 client only takes String, so storing them + // as SecureString here won't really help with anything + /** An optional username for the proxy host, for basic authentication. */ + final String proxyUsername; + + /** An optional password for the proxy host, for basic authentication. */ + final String proxyPassword; + + /** The read timeout for the s3 client. */ + final int readTimeoutMillis; + + private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, + String proxyHost, int proxyPort, String proxyUsername, + String proxyPassword, int readTimeoutMillis) { + this.credentials = credentials; + this.endpoint = endpoint; + this.protocol = protocol; + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyPassword = proxyPassword; + this.readTimeoutMillis = readTimeoutMillis; + } + + /** + * Load all client settings from the given settings. + * + * Note this will always at least return a client named "default". + */ + static Map load(Settings settings) { + Set clientNames = settings.getGroups(PREFIX).keySet(); + Map clients = new HashMap<>(); + for (String clientName : clientNames) { + clients.put(clientName, getClientSettings(settings, clientName)); + } + if (clients.containsKey("default") == false) { + // this won't find any settings under the default client, + // but it will pull all the fallback static settings + clients.put("default", getClientSettings(settings, "default")); + } + return Collections.unmodifiableMap(clients); + } + + // pkg private for tests + /** Parse settings for a single client. */ + static S3ClientSettings getClientSettings(Settings settings, String clientName) { + try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING, S3Repository.Repositories.KEY_SETTING); + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING, S3Repository.Repositories.SECRET_SETTING); + SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING, CLOUD_S3.PROXY_USERNAME_SETTING); + SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING, CLOUD_S3.PROXY_PASSWORD_SETTING)) { + BasicAWSCredentials credentials = null; + if (accessKey.length() != 0) { + if (secretKey.length() != 0) { + credentials = new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + } else { + throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); + } + } else if (secretKey.length() != 0) { + throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]"); + } + return new S3ClientSettings( + credentials, + getConfigValue(settings, clientName, ENDPOINT_SETTING, S3Repository.Repositories.ENDPOINT_SETTING), + getConfigValue(settings, clientName, PROTOCOL_SETTING, S3Repository.Repositories.PROTOCOL_SETTING), + getConfigValue(settings, clientName, PROXY_HOST_SETTING, AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING), + getConfigValue(settings, clientName, PROXY_PORT_SETTING, AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING), + proxyUsername.toString(), + proxyPassword.toString(), + (int)getConfigValue(settings, clientName, READ_TIMEOUT_SETTING, AwsS3Service.CLOUD_S3.READ_TIMEOUT).millis() + ); + } + } + + private static T getConfigValue(Settings settings, String clientName, + Setting.AffixSetting clientSetting, + Setting globalSetting) { + Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + if (concreteSetting.exists(settings)) { + return concreteSetting.get(settings); + } else { + return globalSetting.get(settings); + } + } + +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 50e9b998ad6fd..2ce6396465a2a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -60,45 +60,6 @@ class S3Repository extends BlobStoreRepository { public static final String TYPE = "s3"; - // prefix for s3 client settings - private static final String PREFIX = "s3.client."; - - /** The access key (ie login id) for connecting to s3. */ - public static final AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", - key -> SecureSetting.secureString(key, Repositories.KEY_SETTING)); - - /** The secret key (ie password) for connecting to s3. */ - public static final AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", - key -> SecureSetting.secureString(key, Repositories.SECRET_SETTING)); - - /** An override for the s3 endpoint to connect to. */ - public static final AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> new Setting<>(key, Repositories.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), Property.NodeScope)); - - /** The protocol to use to connec to to s3. */ - public static final AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting(PREFIX, "protocol", - key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope)); - - /** The host name of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting(PREFIX, "proxy.host", - key -> Setting.simpleString(key, Property.NodeScope)); - - /** The port of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting(PREFIX, "proxy.port", - key -> Setting.intSetting(key, 80, 0, 1<<16, Property.NodeScope)); - - /** The username of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username", - key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING)); - - /** The password of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password", - key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING)); - - /** The socket timeout for connecting to s3. */ - public static final AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope)); - /** * Global S3 repositories settings. Starting with: repositories.s3 * NOTE: These are legacy settings. Use the named client config settings above. @@ -328,7 +289,7 @@ public interface Repository { "buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - AmazonS3 client = s3Service.client(metadata, metadata.settings()); + AmazonS3 client = s3Service.client(metadata.settings()); blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index d27c3481357fd..04814b99e889e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -57,9 +57,17 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { }); } + private final Map clientsSettings; + + public S3RepositoryPlugin(Settings settings) { + // eagerly load client settings so that secure settings are read + clientsSettings = S3ClientSettings.load(settings); + assert clientsSettings.isEmpty() == false : "always at least have 'default'"; + } + // overridable for tests protected AwsS3Service createStorageService(Settings settings) { - return new InternalAwsS3Service(settings); + return new InternalAwsS3Service(settings, clientsSettings); } @Override @@ -80,15 +88,15 @@ public List> getSettings() { return Arrays.asList( // named s3 client configuration settings - S3Repository.ACCESS_KEY_SETTING, - S3Repository.SECRET_KEY_SETTING, - S3Repository.ENDPOINT_SETTING, - S3Repository.PROTOCOL_SETTING, - S3Repository.PROXY_HOST_SETTING, - S3Repository.PROXY_PORT_SETTING, - S3Repository.PROXY_USERNAME_SETTING, - S3Repository.PROXY_PASSWORD_SETTING, - S3Repository.READ_TIMEOUT_SETTING, + S3ClientSettings.ACCESS_KEY_SETTING, + S3ClientSettings.SECRET_KEY_SETTING, + S3ClientSettings.ENDPOINT_SETTING, + S3ClientSettings.PROTOCOL_SETTING, + S3ClientSettings.PROXY_HOST_SETTING, + S3ClientSettings.PROXY_PORT_SETTING, + S3ClientSettings.PROXY_USERNAME_SETTING, + S3ClientSettings.PROXY_PASSWORD_SETTING, + S3ClientSettings.READ_TIMEOUT_SETTING, // Register global cloud aws settings: cloud.aws (might have been registered in ec2 plugin) AwsS3Service.KEY_SETTING, diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java index 51e0f5623a910..cc33fcc243eed 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.ClientConfiguration; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.s3.AwsSigner; import org.elasticsearch.repositories.s3.S3RepositoryPlugin; import org.elasticsearch.test.ESTestCase; @@ -35,7 +36,7 @@ public class AWSSignersTests extends ESTestCase { */ @BeforeClass public static void instantiatePlugin() { - new S3RepositoryPlugin(); + new S3RepositoryPlugin(Settings.EMPTY); } public void testSigners() { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index ea2274e7ccbdd..9b94744883a0b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -197,7 +197,7 @@ public void testEncryption() { Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); RepositoryMetaData metadata = new RepositoryMetaData("test-repo", "fs", Settings.EMPTY); - AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(metadata, repositorySettings); + AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings); String bucketName = bucket.get("bucket"); logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); @@ -464,9 +464,8 @@ public void cleanRepositoryFiles(String basePath) { // We check that settings has been set in elasticsearch.yml integration test file // as described in README - assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue()); - RepositoryMetaData metadata = new RepositoryMetaData("test-repo", "fs", Settings.EMPTY); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(metadata, + assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue()); + AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client( Settings.builder().put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), randomBoolean()).build()); try { ObjectListing prevListing = null; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 7d30ffcc1f579..77dbfd1dc5cdb 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -34,8 +34,8 @@ public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { - AWSCredentialsProvider credentialsProvider = - InternalAwsS3Service.buildCredentials(logger, deprecationLogger, Settings.EMPTY, Settings.EMPTY, "default"); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default"); + AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY); assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } @@ -142,8 +142,12 @@ public void testAWSCredentialsWithElasticsearchRepositoriesSettingsAndRepository .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repository.KEY_SETTING, S3Repository.Repository.SECRET_SETTING}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + S3Repository.Repositories.KEY_SETTING, + S3Repository.Repositories.SECRET_SETTING, + S3Repository.Repository.KEY_SETTING, + S3Repository.Repository.SECRET_SETTING}, + "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); } public void testAWSCredentialsWithElasticsearchAwsAndRepositoriesSettingsAndRepositorySettingsBackcompat() { @@ -155,8 +159,14 @@ public void testAWSCredentialsWithElasticsearchAwsAndRepositoriesSettingsAndRepo .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repository.KEY_SETTING, S3Repository.Repository.SECRET_SETTING}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + AwsS3Service.KEY_SETTING, + AwsS3Service.SECRET_SETTING, + S3Repository.Repositories.KEY_SETTING, + S3Repository.Repositories.SECRET_SETTING, + S3Repository.Repository.KEY_SETTING, + S3Repository.Repository.SECRET_SETTING}, + "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); } public void testAWSCredentialsWithElasticsearchAwsAndS3AndRepositoriesSettingsAndRepositorySettingsBackcompat() { @@ -170,15 +180,25 @@ public void testAWSCredentialsWithElasticsearchAwsAndS3AndRepositoriesSettingsAn .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repository.KEY_SETTING, S3Repository.Repository.SECRET_SETTING}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + AwsS3Service.KEY_SETTING, + AwsS3Service.SECRET_SETTING, + AwsS3Service.CLOUD_S3.KEY_SETTING, + AwsS3Service.CLOUD_S3.SECRET_SETTING, + S3Repository.Repositories.KEY_SETTING, + S3Repository.Repositories.SECRET_SETTING, + S3Repository.Repository.KEY_SETTING, + S3Repository.Repository.SECRET_SETTING}, + "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); } protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, String expectedKey, String expectedSecret) { String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, settings, - singleRepositorySettings, configName).getCredentials(); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); + AWSCredentials credentials = InternalAwsS3Service + .buildCredentials(logger, deprecationLogger, clientSettings, singleRepositorySettings) + .getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } @@ -287,8 +307,9 @@ protected void launchAWSConfigurationTest(Settings settings, Boolean useThrottleRetries = S3Repository.getValue(singleRepositorySettings, settings, S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING); - ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(logger, singleRepositorySettings, settings, - "default", maxRetries, null, useThrottleRetries); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); + ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(logger, clientSettings, + singleRepositorySettings, maxRetries, null, useThrottleRetries); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -344,7 +365,8 @@ public void testEndpointSettingBackcompat() { private void assertEndpoint(Settings repositorySettings, Settings settings, String expectedEndpoint) { String configName = InternalAwsS3Service.CLIENT_NAME.get(repositorySettings); - String foundEndpoint = InternalAwsS3Service.findEndpoint(logger, repositorySettings, settings, configName); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); + String foundEndpoint = InternalAwsS3Service.findEndpoint(logger, clientSettings, repositorySettings); assertThat(foundEndpoint, is(expectedEndpoint)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 247d3c68b27cf..f1b3ceb28f149 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -60,7 +60,7 @@ protected void doStop() {} @Override protected void doClose() {} @Override - public AmazonS3 client(RepositoryMetaData metadata, Settings settings) { + public AmazonS3 client(Settings settings) { return new DummyS3Client(); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java index 17a1b989c99be..522ca06614c00 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java @@ -28,6 +28,9 @@ public class TestAwsS3Service extends InternalAwsS3Service { public static class TestPlugin extends S3RepositoryPlugin { + public TestPlugin(Settings settings) { + super(settings); + } @Override protected AwsS3Service createStorageService(Settings settings) { return new TestAwsS3Service(settings); @@ -37,13 +40,12 @@ protected AwsS3Service createStorageService(Settings settings) { IdentityHashMap clients = new IdentityHashMap<>(); public TestAwsS3Service(Settings settings) { - super(settings); + super(settings, S3ClientSettings.load(settings)); } - @Override - public synchronized AmazonS3 client(RepositoryMetaData metadata, Settings repositorySettings) { - return cachedWrapper(super.client(metadata, repositorySettings)); + public synchronized AmazonS3 client(Settings repositorySettings) { + return cachedWrapper(super.client(repositorySettings)); } private AmazonS3 cachedWrapper(AmazonS3 client) { diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml index 6b0286ac81b9b..74cab3edcb705 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml @@ -1,12 +1,7 @@ # Integration tests for Repository S3 component # "S3 repository can be registered": - - skip: - features: warnings - do: - warnings: - - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." - - "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." snapshot.create_repository: repository: test_repo_s3_1 verify: false @@ -14,8 +9,6 @@ type: s3 settings: bucket: "my_bucket_name" - access_key: "AKVAIQBF2RECL7FJWGJQ" - secret_key: "vExyMThREXeRMm/b/LRzEB8jWwvzQeXgjqMX+6br" canned_acl: "public-read" storage_class: "standard" diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java index eb679df9f6adb..3f0709f1a3014 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java @@ -33,7 +33,6 @@ public class EvilJNANativesTests extends ESTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23640") public void testSetMaximumNumberOfThreads() throws IOException { if (Constants.LINUX) { final List lines = Files.readAllLines(PathUtils.get("/proc/self/limits")); @@ -56,7 +55,6 @@ public void testSetMaximumNumberOfThreads() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23640") public void testSetMaxSizeVirtualMemory() throws IOException { if (Constants.LINUX) { final List lines = Files.readAllLines(PathUtils.get("/proc/self/limits")); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index 6270111a0971c..8dd1fb061369e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -44,6 +44,12 @@ public class EvilLoggerConfigurationTests extends ESTestCase { + @Override + public void setUp() throws Exception { + super.setUp(); + LogConfigurator.registerErrorListener(); + } + @Override public void tearDown() throws Exception { LoggerContext context = (LoggerContext) LogManager.getContext(false); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 6246f3e1dd4a4..cbf160b2ab3de 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -51,6 +51,12 @@ public class EvilLoggerTests extends ESTestCase { + @Override + public void setUp() throws Exception { + super.setUp(); + LogConfigurator.registerErrorListener(); + } + @Override public void tearDown() throws Exception { LoggerContext context = (LoggerContext) LogManager.getContext(false); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml index 28ff1e52b876e..bca0703d457bf 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml @@ -118,6 +118,30 @@ - match: { hits.total: 6 } - match: { hits.hits.0._index: "test_remote_cluster:test_index" } +--- +"Test wildcard search": + - do: + cluster.get_settings: + include_defaults: true + + - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + search.remote.test_remote_cluster.seeds: $remote_ip + + - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + + - do: + search: + index: "*:test_index" + + - match: { _shards.total: 6 } + - match: { hits.total: 12 } + --- "Search an filtered alias on the remote cluster": diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yaml new file mode 100644 index 0000000000000..7843e30561ac4 --- /dev/null +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yaml @@ -0,0 +1,58 @@ +--- +"Fetch remote cluster info for existing cluster": + + - do: + remote.info: {} + - match: { my_remote_cluster.connected: true } + - match: { my_remote_cluster.num_nodes_connected: 1} + - match: { my_remote_cluster.max_connections_per_cluster: 1} + - match: { my_remote_cluster.initial_connect_timeout: "30s" } + - is_true: my_remote_cluster.http_addresses.0 + +--- +"Add transient remote cluster based on the preset cluster and check remote info": + - do: + cluster.get_settings: + include_defaults: true + + - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + search.remote.test_remote_cluster.seeds: $remote_ip + + - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + + # we do another search here since this will enforce the connection to be established + # otherwise the cluster might not have been connected yet. + - do: + search: + index: test_remote_cluster:test_index + + - match: { _shards.total: 3 } + - match: { hits.total: 6 } + - match: { hits.hits.0._index: "test_remote_cluster:test_index" } + + - do: + remote.info: {} + - set: { my_remote_cluster.http_addresses.0: remote_http } + - match: { test_remote_cluster.http_addresses.0: $remote_http } + + - match: { test_remote_cluster.connected: true } + - match: { my_remote_cluster.connected: true } + + - match: { test_remote_cluster.seeds.0: $remote_ip } + - match: { my_remote_cluster.seeds.0: $remote_ip } + + - match: { my_remote_cluster.num_nodes_connected: 1} + - match: { test_remote_cluster.num_nodes_connected: 1} + + - match: { my_remote_cluster.max_connections_per_cluster: 1} + - match: { test_remote_cluster.max_connections_per_cluster: 1} + + - match: { my_remote_cluster.initial_connect_timeout: "30s" } + - match: { test_remote_cluster.initial_connect_timeout: "30s" } + diff --git a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 26c8c8082d1af..64f0e977c7d20 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -124,7 +124,11 @@ setup() { # set DATA_DIR to DATA_DIR=/tmp/aoeu,/tmp/asdf sed -i 's/DATA_DIR=.*/DATA_DIR=\/tmp\/aoeu,\/tmp\/asdf/' /etc/init.d/elasticsearch cat /etc/init.d/elasticsearch | grep "DATA_DIR" - service elasticsearch start + run service elasticsearch start + if [ "$status" -ne 0 ]; then + cat /var/log/elasticsearch/* + fail + fi wait_for_elasticsearch_status assert_file_not_exist /tmp/aoeu,/tmp/asdf assert_file_not_exist /tmp/aoeu, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote.info.json new file mode 100644 index 0000000000000..a90d4ff6984fa --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote.info.json @@ -0,0 +1,12 @@ +{ + "remote.info": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/remote-info.html", + "methods": ["GET"], + "url": { + "path": "/_remote/info", + "paths": ["/_remote/info"], + "params": {} + }, + "body": null + } +} \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index 8d72d40b10ba5..dc097765c577a 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -169,33 +169,6 @@ $body: | /^(index2 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){5}$/ ---- -"Test cat shards with shadow replicas": - - skip: - version: " - 5.1.99" - reason: deprecation was added in 5.2.0 - features: "warnings" - - - do: - indices.create: - index: index3 - body: - settings: - number_of_shards: "1" - number_of_replicas: "1" - shadow_replicas: true - shared_filesystem: false - warnings: - - "[index.shadow_replicas] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." - - "[index.shared_filesystem] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." - - - do: - cat.shards: - index: index3 - - match: - $body: | - /^(index3 \s+ \d \s+ (p|s) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){2}$/ - --- "Test cat shards using wildcards": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/remote.info/10_info.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/remote.info/10_info.yaml new file mode 100644 index 0000000000000..34c4605941917 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/remote.info/10_info.yaml @@ -0,0 +1,9 @@ +--- +"Get an empty emote info": + - skip: + version: " - 5.3.99" + reason: this API doesn't exist in 5.3.x yet + - do: + remote.info: {} + - is_true: '' + diff --git a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java index 81c3b47bde44b..21cd1961d7c2d 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java +++ b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java @@ -19,8 +19,11 @@ package org.elasticsearch.common.settings; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -30,6 +33,8 @@ public class MockSecureSettings implements SecureSettings { private Map secureStrings = new HashMap<>(); + private Map files = new HashMap<>(); + private Set settingNames = new HashSet<>(); @Override public boolean isLoaded() { @@ -38,7 +43,7 @@ public boolean isLoaded() { @Override public Set getSettingNames() { - return secureStrings.keySet(); + return settingNames; } @Override @@ -46,8 +51,19 @@ public SecureString getString(String setting) { return secureStrings.get(setting); } + @Override + public InputStream getFile(String setting) { + return new ByteArrayInputStream(files.get(setting)); + } + public void setString(String setting, String value) { secureStrings.put(setting, new SecureString(value.toCharArray())); + settingNames.add(setting); + } + + public void setFile(String setting, byte[] value) { + files.put(setting, value); + settingNames.add(setting); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java index 9a6747d5301c6..2956e44d50799 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java @@ -35,9 +35,4 @@ public MockEngineFactory(Class wrapper) { public Engine newReadWriteEngine(EngineConfig config) { return new MockInternalEngine(config, wrapper); } - - @Override - public Engine newReadOnlyEngine(EngineConfig config) { - return new MockShadowEngine(config, wrapper); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index fbc4352b1e28c..a7fc61e691330 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -47,7 +47,7 @@ import java.util.concurrent.atomic.AtomicBoolean; /** - * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine} or {@link org.elasticsearch.test.engine.MockShadowEngine} + * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine} * since they need to subclass the actual engine */ public final class MockEngineSupport { diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java index 603907cc03c07..fe8c4daec8dfd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -29,12 +29,10 @@ final class MockInternalEngine extends InternalEngine { private MockEngineSupport support; - private final boolean randomizeFlushOnClose; private Class wrapperClass; MockInternalEngine(EngineConfig config, Class wrapper) throws EngineException { super(config); - randomizeFlushOnClose = config.getIndexSettings().isOnSharedFilesystem() == false; wrapperClass = wrapper; } @@ -61,17 +59,13 @@ public void close() throws IOException { @Override public void flushAndClose() throws IOException { - if (randomizeFlushOnClose) { - switch (support().flushOrClose(MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) { - case FLUSH_AND_CLOSE: - flushAndCloseInternal(); - break; - case CLOSE: - super.close(); - break; - } - } else { - flushAndCloseInternal(); + switch (support().flushOrClose(MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) { + case FLUSH_AND_CLOSE: + flushAndCloseInternal(); + break; + case CLOSE: + super.close(); + break; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java deleted file mode 100644 index 2116dcc390cde..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.engine; - -import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.SearcherManager; -import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.index.engine.ShadowEngine; - -final class MockShadowEngine extends ShadowEngine { - private final MockEngineSupport support; - - MockShadowEngine(EngineConfig config, Class wrapper) { - super(config); - this.support = new MockEngineSupport(config, wrapper); - } - - @Override - protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { - final Searcher engineSearcher = super.newSearcher(source, searcher, manager); - return support.wrapSearcher(source, engineSearcher, searcher, manager); - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index a2e03a063cf5a..c2c0f57c942ce 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -56,7 +56,7 @@ public class ClientYamlTestExecutionContext { private final boolean randomizeContentType; - public ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType) { + ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType) { this.clientYamlTestClient = clientYamlTestClient; this.randomizeContentType = randomizeContentType; } @@ -68,7 +68,7 @@ public ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, Map headers) throws IOException { //makes a copy of the parameters before modifying them for this specific request - HashMap requestParams = new HashMap<>(params); + Map requestParams = new HashMap<>(params); requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params for (Map.Entry entry : requestParams.entrySet()) { if (stash.containsStashedValue(entry.getValue())) { @@ -76,9 +76,17 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } - HttpEntity entity = createEntity(bodies, headers); + //make a copy of the headers before modifying them for this specific request + Map requestHeaders = new HashMap<>(headers); + for (Map.Entry entry : requestHeaders.entrySet()) { + if (stash.containsStashedValue(entry.getValue())) { + entry.setValue(stash.getValue(entry.getValue()).toString()); + } + } + + HttpEntity entity = createEntity(bodies, requestHeaders); try { - response = callApiInternal(apiName, requestParams, entity, headers); + response = callApiInternal(apiName, requestParams, entity, requestHeaders); return response; } catch(ClientYamlTestResponseException e) { response = e.getRestTestResponse(); @@ -143,7 +151,8 @@ private BytesRef bodyAsBytesRef(Map bodyAsMap, XContentType xCon } } - private ClientYamlTestResponse callApiInternal(String apiName, Map params, + // pkg-private for testing + ClientYamlTestResponse callApiInternal(String apiName, Map params, HttpEntity entity, Map headers) throws IOException { return clientYamlTestClient.callApi(apiName, params, entity, headers); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 787e219b0edf0..1efd210b110c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -103,7 +103,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha @Override public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) { - if (currentState == IndexShardState.CLOSED && validCheckIndexStates.contains(previousState) && indexShard.indexSettings().isOnSharedFilesystem() == false) { + if (currentState == IndexShardState.CLOSED && validCheckIndexStates.contains(previousState)) { shardSet.put(indexShard, Boolean.TRUE); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java new file mode 100644 index 0000000000000..2150baf59eab0 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.yaml; + +import org.apache.http.HttpEntity; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class ClientYamlTestExecutionContextTests extends ESTestCase { + + public void testHeadersSupportStashedValueReplacement() throws IOException { + final AtomicReference> headersRef = new AtomicReference<>(); + final ClientYamlTestExecutionContext context = + new ClientYamlTestExecutionContext(null, randomBoolean()) { + @Override + ClientYamlTestResponse callApiInternal(String apiName, Map params, + HttpEntity entity, + Map headers) { + headersRef.set(headers); + return null; + } + }; + final Map headers = new HashMap<>(); + headers.put("foo", "$bar"); + headers.put("foo1", "baz ${c}"); + + context.stash().stashValue("bar", "foo2"); + context.stash().stashValue("c", "bar1"); + + assertNull(headersRef.get()); + context.callApi("test", Collections.emptyMap(), Collections.emptyList(), headers); + assertNotNull(headersRef.get()); + assertNotEquals(headers, headersRef.get()); + + assertEquals("foo2", headersRef.get().get("foo")); + assertEquals("baz bar1", headersRef.get().get("foo1")); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 7ad1cc8377fc1..dd6a297934412 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -123,7 +123,7 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio parser = createParser(YamlXContent.yamlXContent, "\"First test section\": \n" + " - skip:\n" + - " version: \"2.0.0 - 2.2.0\"\n" + + " version: \"5.0.0 - 5.2.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do :\n" + " catch: missing\n" + @@ -138,8 +138,9 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); - assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.V_2_2_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(testSection.getSkipSection().getUpperVersion(), + equalTo(Version.V_5_2_0_UNRELEASED)); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 4c96986146b5e..4c97eb453610e 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -66,10 +66,10 @@ public void testParseTestSetupTeardownAndSections() throws Exception { " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + "\n" + "---\n" + - "\"Get type mapping - pre 1.0\":\n" + + "\"Get type mapping - pre 5.0\":\n" + "\n" + " - skip:\n" + - " version: \"2.0.0 - \"\n" + + " version: \"5.0.0 - \"\n" + " reason: \"for newer versions the index name is always returned\"\n" + "\n" + " - do:\n" + @@ -130,11 +130,13 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); - assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0")); + assertThat(restTestSuite.getTestSections().get(1).getName(), + equalTo("Get type mapping - pre 5.0")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); + assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), + equalTo(Version.V_5_0_0)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index f6174cf0be207..7b3022dd937cc 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.yaml.YamlXContent; -import org.elasticsearch.test.rest.yaml.section.SetupSection; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -54,7 +53,7 @@ public void testParseSetupSection() throws Exception { public void testParseSetupAndSkipSectionNoSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"2.0.0 - 2.3.0\"\n" + + " version: \"5.0.0 - 5.3.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do:\n" + " index1:\n" + @@ -75,8 +74,9 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); - assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_2_3_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(setupSection.getSkipSection().getUpperVersion(), + equalTo(Version.V_5_3_0_UNRELEASED)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getDoSections().size(), equalTo(2)); assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 85304be1711a7..1ba31ed288d30 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -34,16 +34,18 @@ public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public void testSkip() { - SkipSection section = new SkipSection("2.0.0 - 2.1.0", + SkipSection section = new SkipSection("5.0.0 - 5.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_2_0_0)); - section = new SkipSection(randomBoolean() ? null : "2.0.0 - 2.1.0", Collections.singletonList("boom"), "foobar"); + assertTrue(section.skip(Version.V_5_0_0)); + section = new SkipSection(randomBoolean() ? null : "5.0.0 - 5.1.0", + Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); } public void testMessage() { - SkipSection section = new SkipSection("2.0.0 - 2.1.0", Collections.singletonList("warnings"), "foobar"); + SkipSection section = new SkipSection("5.0.0 - 5.1.0", + Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); section = new SkipSection(null, Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); @@ -53,14 +55,14 @@ public void testMessage() { public void testParseSkipSectionVersionNoFeature() throws Exception { parser = createParser(YamlXContent.yamlXContent, - "version: \" - 2.1.0\"\n" + + "version: \" - 5.1.1\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_2_1_0)); + assertThat(skipSection.getUpperVersion(), equalTo(Version.V_5_1_1_UNRELEASED)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index f057d0d370d63..de8e83692b83d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -56,7 +56,7 @@ public void testParseTeardownSection() throws Exception { public void testParseWithSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"2.0.0 - 2.3.0\"\n" + + " version: \"5.0.0 - 5.3.0\"\n" + " reason: \"there is a reason\"\n" + " - do:\n" + " delete:\n" + @@ -75,8 +75,8 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_2_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_5_3_0_UNRELEASED)); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java index c97188e7016aa..eee31dd09b471 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java @@ -46,21 +46,22 @@ public void testRandomVersionBetween() { assertTrue(got.onOrBefore(Version.CURRENT)); // sub range - got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha1); - assertTrue(got.onOrAfter(Version.V_2_0_0)); - assertTrue(got.onOrBefore(Version.V_5_0_0_alpha1)); + got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.V_6_0_0_alpha1_UNRELEASED); + assertTrue(got.onOrAfter(Version.V_5_0_0)); + assertTrue(got.onOrBefore(Version.V_6_0_0_alpha1_UNRELEASED)); // unbounded lower - got = VersionUtils.randomVersionBetween(random(), null, Version.V_5_0_0_alpha1); + got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_alpha1_UNRELEASED); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); - assertTrue(got.onOrBefore(Version.V_5_0_0_alpha1)); + assertTrue(got.onOrBefore(Version.V_6_0_0_alpha1_UNRELEASED)); got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0)); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, null); - assertTrue(got.onOrAfter(Version.V_2_0_0)); + got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, null); + assertTrue(got.onOrAfter(Version.V_5_0_0)); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); @@ -71,8 +72,9 @@ public void testRandomVersionBetween() { assertEquals(got, VersionUtils.getFirstVersion()); got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT); assertEquals(got, Version.CURRENT); - got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.V_5_0_0_alpha1); - assertEquals(got, Version.V_5_0_0_alpha1); + got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1_UNRELEASED, + Version.V_6_0_0_alpha1_UNRELEASED); + assertEquals(got, Version.V_6_0_0_alpha1_UNRELEASED); // implicit range of one got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());