diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index bf3ffcabe2f68..e45ba7ce9dc14 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -528,11 +528,12 @@ class BuildPlugin implements Plugin { project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-${project.version}.pom" doLast { project.copy { from generatePOMTask.destination into "${project.buildDir}/distributions" - rename { "${project.archivesBaseName}-${project.version}.pom" } + rename { generatePOMTask.ext.pomFileName } } } // build poms with assemble (if the assemble task exists) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 6f42e41beaa1b..5216f08427428 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -19,23 +19,19 @@ package org.elasticsearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import nebula.plugin.info.scm.ScmInfoPlugin +import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask -import org.gradle.api.InvalidUserDataException import org.gradle.api.Project -import org.gradle.api.Task -import org.gradle.api.XmlProvider import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin +import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip +import org.gradle.jvm.tasks.Jar -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.StandardCopyOption import java.util.regex.Matcher import java.util.regex.Pattern /** @@ -55,16 +51,10 @@ public class PluginBuildPlugin extends BuildPlugin { String name = project.pluginProperties.extension.name project.archivesBaseName = name - if (project.pluginProperties.extension.hasClientJar) { - // for plugins which work with the transport client, we copy the jar - // file to a new name, copy the nebula generated pom to the same name, - // and generate a different pom for the zip - addClientJarPomGeneration(project) - addClientJarTask(project) - } - // while the jar isn't normally published, we still at least build a pom of deps - // in case it is published, for instance when other plugins extend this plugin - configureJarPom(project) + // set teh project description so it will be picked up by publishing + project.description = project.pluginProperties.extension.description + + configurePublishing(project) project.integTestCluster.dependsOn(project.bundlePlugin) project.tasks.run.dependsOn(project.bundlePlugin) @@ -94,6 +84,32 @@ public class PluginBuildPlugin extends BuildPlugin { project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build } + private void configurePublishing(Project project) { + // Only configure publishing if applied externally + if (project.pluginProperties.extension.hasClientJar) { + project.plugins.apply(MavenScmPlugin.class) + // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + project.tasks.withType(Jar) { + baseName = baseName + "-client" + } + // always configure publishing for client jars + project.plugins.apply(MavenScmPlugin.class) + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + "-client" + ) + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom" + } + } else { + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + ) + } + + } + } + private static void configureDependencies(Project project) { project.dependencies { compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" @@ -161,33 +177,6 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to move jar and associated files to a "-client" name. */ - protected static void addClientJarTask(Project project) { - Task clientJar = project.tasks.create('clientJar') - clientJar.dependsOn(project.jar, project.tasks.generatePomFileForClientJarPublication, project.javadocJar, project.sourcesJar) - clientJar.doFirst { - Path jarFile = project.jar.outputs.files.singleFile.toPath() - String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}") - Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING) - - String clientPomFileName = clientFileName.replace('.jar', '.pom') - Files.copy( - project.tasks.generatePomFileForClientJarPublication.outputs.files.singleFile.toPath(), - jarFile.resolveSibling(clientPomFileName), - StandardCopyOption.REPLACE_EXISTING - ) - - String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar') - String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar') - Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), - StandardCopyOption.REPLACE_EXISTING) - - String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') - String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') - Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), - StandardCopyOption.REPLACE_EXISTING) - } - project.assemble.dependsOn(clientJar) - } static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/) @@ -209,39 +198,11 @@ public class PluginBuildPlugin extends BuildPlugin { /** Adds nebula publishing task to generate a pom file for the plugin. */ protected static void addClientJarPomGeneration(Project project) { - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - clientJar(MavenPublication) { - from project.components.java - artifactId = project.pluginProperties.extension.name + '-client' - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.appendNode('name', project.pluginProperties.extension.name) - root.appendNode('description', project.pluginProperties.extension.description) - root.appendNode('url', urlFromOrigin(project.scminfo.origin)) - Node scmNode = root.appendNode('scm') - scmNode.appendNode('url', project.scminfo.origin) - } - } - } - } + project.plugins.apply(MavenScmPlugin.class) + project.description = project.pluginProperties.extension.description } /** Configure the pom for the main jar of this plugin */ - protected static void configureJarPom(Project project) { - project.plugins.apply(ScmInfoPlugin.class) - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - nebula(MavenPublication) { - artifactId project.pluginProperties.extension.name - } - } - } - } protected void addNoticeGeneration(Project project) { File licenseFile = project.pluginProperties.extension.licenseFile diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 598d82d5d9aa7..42dc29df058c6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -21,6 +21,7 @@ package org.elasticsearch.gradle.precommit import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask +import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.file.FileCollection @@ -101,6 +102,11 @@ class PrecommitTasks { signaturesURLs = project.forbiddenApis.signaturesURLs + [ getClass().getResource('/forbidden/es-server-signatures.txt') ] } + // forbidden apis doesn't support Java 11, so stop at 10 + String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ? + JavaVersion.VERSION_1_10 : + project.compilerJavaVersion).getMajorVersion() + targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}" } Task forbiddenApis = project.tasks.findByName('forbiddenApis') forbiddenApis.group = "" // clear group, so this does not show up under verification tasks diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 33e136a66f44f..cc179e12e3163 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -20,5 +20,14 @@ org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +@defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users +org.elasticsearch.common.logging.DeprecationLogger +org.elasticsearch.common.logging.ESLoggerFactory +org.elasticsearch.common.logging.LogConfigurator +org.elasticsearch.common.logging.LoggerMessageFormat +org.elasticsearch.common.logging.Loggers +org.elasticsearch.common.logging.NodeNamePatternConverter +org.elasticsearch.common.logging.PrefixLogger + @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.elasticsearch.common.xcontent.LoggingDeprecationHandler diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 58b4b268788b5..a914008376a5d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; +import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -34,6 +35,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.client.Request; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; @@ -174,6 +176,8 @@ public void testClusterHealthYellowClusterLevel() throws IOException { request.timeout("5s"); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(0)); } @@ -186,6 +190,8 @@ public void testClusterHealthYellowIndicesLevel() throws IOException { request.level(ClusterHealthRequest.Level.INDICES); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(2)); for (Map.Entry entry : response.getIndices().entrySet()) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 0037460150f1a..95a29e99e5266 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; @@ -36,6 +37,7 @@ import static org.hamcrest.Matchers.is; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32993") public class MachineLearningIT extends ESRestHighLevelClientTestCase { public void testPutJob() throws Exception { diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index f093b6ebcfae0..f92e364bae102 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -6,23 +6,70 @@ ["float",id="cluster-nodes"] == Node specification -Most cluster level APIs allow to specify which nodes to execute on (for -example, getting the node stats for a node). Nodes can be identified in -the APIs either using their internal node id, the node name, address, -custom attributes, or just the `_local` node receiving the request. For -example, here are some sample executions of nodes info: +Some cluster-level APIs may operate on a subset of the nodes which can be +specified with _node filters_. For example, the <>, +<>, and <> APIs +can all report results from a filtered set of nodes rather than from all nodes. + +_Node filters_ are written as a comma-separated list of individual filters, +each of which adds or removes nodes from the chosen subset. Each filter can be +one of the following: + +* `_all`, to add all nodes to the subset. +* `_local`, to add the local node to the subset. +* `_master`, to add the currently-elected master node to the subset. +* a node id or name, to add this node to the subset. +* an IP address or hostname, to add all matching nodes to the subset. +* a pattern, using `*` wildcards, which adds all nodes to the subset + whose name, address or hostname matches the pattern. +* `master:true`, `data:true`, `ingest:true` or `coordinating_only:true`, which + respectively add to the subset all master-eligible nodes, all data nodes, + all ingest nodes, and all coordinating-only nodes. +* `master:false`, `data:false`, `ingest:false` or `coordinating_only:false`, + which respectively remove from the subset all master-eligible nodes, all data + nodes, all ingest nodes, and all coordinating-only nodes. +* a pair of patterns, using `*` wildcards, of the form `attrname:attrvalue`, + which adds to the subset all nodes with a custom node attribute whose name + and value match the respective patterns. Custom node attributes are + configured by setting properties in the configuration file of the form + `node.attr.attrname: attrvalue`. + +NOTE: node filters run in the order in which they are given, which is important +if using filters that remove nodes from the set. For example +`_all,master:false` means all the nodes except the master-eligible ones, but +`master:false,_all` means the same as `_all` because the `_all` filter runs +after the `master:false` filter. + +NOTE: if no filters are given, the default is to select all nodes. However, if +any filters are given then they run starting with an empty chosen subset. This +means that filters such as `master:false` which remove nodes from the chosen +subset are only useful if they come after some other filters. When used on its +own, `master:false` selects no nodes. + +Here are some examples of the use of node filters with the +<> APIs. [source,js] -------------------------------------------------- -# Local +# If no filters are given, the default is to select all nodes +GET /_nodes +# Explicitly select all nodes +GET /_nodes/_all +# Select just the local node GET /_nodes/_local -# Address -GET /_nodes/10.0.0.3,10.0.0.4 -GET /_nodes/10.0.0.* -# Names +# Select the elected master node +GET /_nodes/_master +# Select nodes by name, which can include wildcards GET /_nodes/node_name_goes_here GET /_nodes/node_name_goes_* -# Attributes (set something like node.attr.rack: 2 in the config) +# Select nodes by address, which can include wildcards +GET /_nodes/10.0.0.3,10.0.0.4 +GET /_nodes/10.0.0.* +# Select nodes by role +GET /_nodes/_all,master:false +GET /_nodes/data:true,ingest:true +GET /_nodes/coordinating_only:true +# Select nodes by custom attribute (e.g. with something like `node.attr.rack: 2` in the configuration file) GET /_nodes/rack:2 GET /_nodes/ra*:2 GET /_nodes/ra*:2* diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index c8fa2c9bf7c40..541ee51a58adb 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -1,12 +1,23 @@ [[cluster-nodes-hot-threads]] == Nodes hot_threads -An API allowing to get the current hot threads on each node in the -cluster. Endpoints are `/_nodes/hot_threads`, and -`/_nodes/{nodesIds}/hot_threads`. +This API yields a breakdown of the hot threads on each selected node in the +cluster. Its endpoints are `/_nodes/hot_threads` and +`/_nodes/{nodes}/hot_threads`: -The output is plain text with a breakdown of each node's top hot -threads. Parameters allowed are: +[source,js] +-------------------------------------------------- +GET /_nodes/hot_threads +GET /_nodes/nodeId1,nodeId2/hot_threads +-------------------------------------------------- +// CONSOLE + +The first command gets the hot threads of all the nodes in the cluster. The +second command gets the hot threads of only `nodeId1` and `nodeId2`. Nodes can +be selected using <>. + +The output is plain text with a breakdown of each node's top hot threads. The +allowed parameters are: [horizontal] `threads`:: number of hot threads to provide, defaults to 3. diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3de850418719d..78bccc8bd695d 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -213,3 +213,12 @@ Will return, for example: // 3. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. + +This API can be restricted to a subset of the nodes using the `?nodeId` +parameter, which accepts <>: + +[source,js] +-------------------------------------------------- +GET /_cluster/stats?nodeId=node1,node*,master:false +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 3e44b2c41f67e..8229f74bdd05b 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -23,7 +23,7 @@ There are a few concepts that are core to Elasticsearch. Understanding these con [float] === Near Realtime (NRT) -Elasticsearch is a near real time search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. +Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. [float] === Cluster @@ -59,7 +59,7 @@ In a single cluster, you can define as many indexes as you want. deprecated[6.0.0,See <>] -A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, eg one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. +A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. [float] === Document @@ -1066,7 +1066,7 @@ In the previous section, we skipped over a little detail called the document sco But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. -The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. +The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow us to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index bdb009167552a..4fbed66449800 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -38,10 +38,10 @@ Dynamic templates are specified as an array of named objects: <3> The mapping that the matched field should use. -Templates are processed in order -- the first matching template wins. New -templates can be appended to the end of the list with the -<> API. If a new template has the same -name as an existing template, it will replace the old version. +Templates are processed in order -- the first matching template wins. When +putting new dynamic templates through the <> API, +all existing templates are overwritten. This allows for dynamic templates to be +reordered or deleted after they were initially added. [[match-mapping-type]] ==== `match_mapping_type` diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 4f29b0549b3a8..9aa4483a8f200 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1056,6 +1056,33 @@ Specifies the supported protocols for TLS/SSL. Specifies the cipher suites that should be supported. +[float] +[[ref-kerberos-settings]] +===== Kerberos realm settings + +For a Kerberos realm, the `type` must be set to `kerberos`. In addition to the +<>, you can specify +the following settings: + +`keytab.path`:: Specifies the path to the Kerberos keytab file that contains the +service principal used by this {es} node. This must be a location within the +{es} configuration directory and the file must have read permissions. Required. + +`remove_realm_name`:: Set to `true` to remove the realm part of principal names. +Principal names in Kerberos have the form `user/instance@REALM`. If this option +is `true`, the realm part (`@REALM`) will not be included in the username. +Defaults to `false`. + +`krb.debug`:: Set to `true` to enable debug logs for the Java login module that +provides support for Kerberos authentication. Defaults to `false`. + +`cache.ttl`:: The time-to-live for cached user entries. A user is cached for +this period of time. Specify the time period using the standard {es} +<>. Defaults to `20m`. + +`cache.max_users`:: The maximum number of user entries that can live in the +cache at any given time. Defaults to 100,000. + [float] [[load-balancing]] ===== Load balancing and failover diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java index d0d0b2165ca10..be3db76bac250 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java @@ -53,7 +53,7 @@ public void testArrayInitializers() { "Object[] x = new Object[] {y, z, 1 + s, s + 'aaa'}; return x;"); assertEquals(4, objects.length); - assertEquals(new Integer(2), objects[0]); + assertEquals(Integer.valueOf(2), objects[0]); assertEquals(new ArrayList(), objects[1]); assertEquals("1aaa", objects[2]); assertEquals("aaaaaa", objects[3]); @@ -85,7 +85,7 @@ public void testListInitializers() { list = (List)exec("int y = 2; List z = new ArrayList(); String s = 'aaa'; List x = [y, z, 1 + s, s + 'aaa']; return x;"); assertEquals(4, list.size()); - assertEquals(new Integer(2), list.get(0)); + assertEquals(Integer.valueOf(2), list.get(0)); assertEquals(new ArrayList(), list.get(1)); assertEquals("1aaa", list.get(2)); assertEquals("aaaaaa", list.get(3)); @@ -100,15 +100,15 @@ public void testMapInitializers() { map = (Map)exec("[5 : 7, -1 : 14]"); assertEquals(2, map.size()); - assertEquals(new Integer(7), map.get(5)); - assertEquals(new Integer(14), map.get(-1)); + assertEquals(Integer.valueOf(7), map.get(5)); + assertEquals(Integer.valueOf(14), map.get(-1)); map = (Map)exec("int y = 2; int z = 3; Map x = [y*z : y + z, y - z : y, z : z]; return x;"); assertEquals(3, map.size()); - assertEquals(new Integer(5), map.get(6)); - assertEquals(new Integer(2), map.get(-1)); - assertEquals(new Integer(3), map.get(3)); + assertEquals(Integer.valueOf(5), map.get(6)); + assertEquals(Integer.valueOf(2), map.get(-1)); + assertEquals(Integer.valueOf(3), map.get(3)); map = (Map)exec("int y = 2; List z = new ArrayList(); String s = 'aaa';" + "def x = [y : z, 1 + s : s + 'aaa']; return x;"); @@ -139,7 +139,7 @@ public void testCrazyInitializer() { list3.add(9); assertEquals(3, map.size()); - assertEquals(new Integer(5), map.get(6)); + assertEquals(Integer.valueOf(5), map.get(6)); assertEquals(list2, map.get("s")); assertEquals(list3, map.get(3)); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index e310f3012a9fb..7eb34bcdcd3aa 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -39,6 +39,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -222,7 +223,8 @@ protected ChannelHandler getClientChannelInitializer() { static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); @Override - protected Netty4TcpChannel initiateChannel(InetSocketAddress address, ActionListener listener) throws IOException { + protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ActionListener listener) throws IOException { + InetSocketAddress address = node.getAddress().address(); ChannelFuture channelFuture = bootstrap.connect(address); Channel channel = channelFuture.channel(); if (channel == null) { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 8d628ace2ee38..9d6f016086c26 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -87,13 +86,6 @@ protected MockTransportService build(Settings settings, Version version, Cluster return transportService; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - final Netty4Transport t = (Netty4Transport) transport; - final TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 47229a0df2f6e..129f0ada77d5d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,6 +21,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -82,7 +83,8 @@ protected NioTcpServerChannel bind(String name, InetSocketAddress address) throw } @Override - protected NioTcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected NioTcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); NioTcpChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 9322bfd71222a..baae00f81a3be 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -93,12 +92,6 @@ protected MockTransportService build(Settings settings, Version version, Cluster return transportService; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java index cc354145b11bb..dd5d98dfabbd0 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java +++ b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java @@ -16,6 +16,8 @@ package org.elasticsearch.common.inject.matcher; +import org.elasticsearch.common.SuppressForbidden; + import java.lang.annotation.Annotation; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -327,7 +329,9 @@ public String toString() { return "inPackage(" + targetPackage.getName() + ")"; } + @SuppressForbidden(reason = "ClassLoader.getDefinedPackage not available yet") public Object readResolve() { + // TODO minJava >= 9 : use ClassLoader.getDefinedPackage and remove @SuppressForbidden return inPackage(Package.getPackage(packageName)); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 057a970470b15..d38eb03fae3dd 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -160,11 +160,13 @@ public static String threadName(Settings settings, String namePrefix) { if (Node.NODE_NAME_SETTING.exists(settings)) { return threadName(Node.NODE_NAME_SETTING.get(settings), namePrefix); } else { + // TODO this should only be allowed in tests return threadName("", namePrefix); } } public static String threadName(final String nodeName, final String namePrefix) { + // TODO missing node names should only be allowed in tests return "elasticsearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]"; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 0c04f70d436e7..7e39aacd43544 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -528,6 +528,7 @@ public Translog.Location getTranslogLastWriteLocation() { private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { if (combinedDeletionPolicy.hasUnreferencedCommits()) { indexWriter.deleteUnusedFiles(); + translog.trimUnreferencedReaders(); } } @@ -1869,6 +1870,8 @@ private void releaseIndexCommit(IndexCommit snapshot) throws IOException { // Revisit the deletion policy if we can clean up the snapshotting commit. if (combinedDeletionPolicy.releaseCommit(snapshot)) { ensureOpen(); + // Here we don't have to trim translog because snapshotting an index commit + // does not lock translog or prevents unreferenced files from trimming. indexWriter.deleteUnusedFiles(); } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java b/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java new file mode 100644 index 0000000000000..b823a920039b5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/translog/TragicExceptionHolder.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import java.util.concurrent.atomic.AtomicReference; + +public class TragicExceptionHolder { + private final AtomicReference tragedy = new AtomicReference<>(); + + /** + * Sets the tragic exception or if the tragic exception is already set adds passed exception as suppressed exception + * @param ex tragic exception to set + */ + public void setTragicException(Exception ex) { + assert ex != null; + if (tragedy.compareAndSet(null, ex) == false) { + if (tragedy.get() != ex) { // to ensure there is no self-suppression + tragedy.get().addSuppressed(ex); + } + } + } + + public Exception get() { + return tragedy.get(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 279bdb1904643..af6cb6c81cb50 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -66,6 +66,7 @@ import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -117,6 +118,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final Path location; private TranslogWriter current; + protected final TragicExceptionHolder tragedy = new TragicExceptionHolder(); private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final LongSupplier globalCheckpointSupplier; @@ -310,8 +312,28 @@ public boolean isOpen() { return closed.get() == false; } + private static boolean calledFromOutsideOrViaTragedyClose() { + List frames = Stream.of(Thread.currentThread().getStackTrace()). + skip(3). //skip getStackTrace, current method and close method frames + limit(10). //limit depth of analysis to 10 frames, it should be enough to catch closing with, e.g. IOUtils + filter(f -> + { + try { + return Translog.class.isAssignableFrom(Class.forName(f.getClassName())); + } catch (Exception ignored) { + return false; + } + } + ). //find all inner callers including Translog subclasses + collect(Collectors.toList()); + //the list of inner callers should be either empty or should contain closeOnTragicEvent method + return frames.isEmpty() || frames.stream().anyMatch(f -> f.getMethodName().equals("closeOnTragicEvent")); + } + @Override public void close() throws IOException { + assert calledFromOutsideOrViaTragedyClose() : + "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { @@ -462,7 +484,7 @@ TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, lon getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong()); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -726,7 +748,8 @@ public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { } } catch (IOException e) { IOUtils.closeWhileHandlingException(newReaders); - close(); + tragedy.setTragicException(e); + closeOnTragicEvent(e); throw e; } @@ -779,10 +802,10 @@ public boolean ensureSynced(Stream locations) throws IOException { * * @param ex if an exception occurs closing the translog, it will be suppressed into the provided exception */ - private void closeOnTragicEvent(final Exception ex) { + protected void closeOnTragicEvent(final Exception ex) { // we can not hold a read lock here because closing will attempt to obtain a write lock and that would result in self-deadlock assert readLock.isHeldByCurrentThread() == false : Thread.currentThread().getName(); - if (current.getTragicException() != null) { + if (tragedy.get() != null) { try { close(); } catch (final AlreadyClosedException inner) { @@ -1559,7 +1582,8 @@ public void rollGeneration() throws IOException { current = createWriter(current.getGeneration() + 1); logger.trace("current translog set to [{}]", current.getGeneration()); } catch (final Exception e) { - IOUtils.closeWhileHandlingException(this); // tragic event + tragedy.setTragicException(e); + closeOnTragicEvent(e); throw e; } } @@ -1672,7 +1696,7 @@ long getFirstOperationPosition() { // for testing private void ensureOpen() { if (closed.get()) { - throw new AlreadyClosedException("translog is already closed", current.getTragicException()); + throw new AlreadyClosedException("translog is already closed", tragedy.get()); } } @@ -1686,7 +1710,7 @@ ChannelFactory getChannelFactory() { * Otherwise (no tragic exception has occurred) it returns null. */ public Exception getTragicException() { - return current.getTragicException(); + return tragedy.get(); } /** Reads and returns the current checkpoint */ @@ -1769,8 +1793,8 @@ static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, S final String translogUUID = UUIDs.randomBase64UUID(); TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm - ); + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, + new TragicExceptionHolder()); writer.close(); return translogUUID; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 336cd8f5eeab7..0fa9237f6031e 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -52,7 +52,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { /* the number of translog operations written to this file */ private volatile int operationCounter; /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */ - private volatile Exception tragedy; + private final TragicExceptionHolder tragedy; /* A buffered outputstream what writes to the writers channel */ private final OutputStream outputStream; /* the total offset of this file including the bytes written to the file as well as into the buffer */ @@ -77,7 +77,10 @@ private TranslogWriter( final FileChannel channel, final Path path, final ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header) throws IOException { + final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header, + TragicExceptionHolder tragedy) + throws + IOException { super(initialCheckpoint.generation, channel, path, header); assert initialCheckpoint.offset == channel.position() : "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel position [" @@ -95,12 +98,13 @@ private TranslogWriter( assert initialCheckpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : initialCheckpoint.trimmedAboveSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; + this.tragedy = tragedy; } public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, - final long primaryTerm) + final long primaryTerm, TragicExceptionHolder tragedy) throws IOException { final FileChannel channel = channelFactory.open(file); try { @@ -121,7 +125,7 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -130,24 +134,8 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f } } - /** - * If this {@code TranslogWriter} was closed as a side-effect of a tragic exception, - * e.g. disk full while flushing a new segment, this returns the root cause exception. - * Otherwise (no tragic exception has occurred) it returns null. - */ - public Exception getTragicException() { - return tragedy; - } - private synchronized void closeWithTragicEvent(final Exception ex) { - assert ex != null; - if (tragedy == null) { - tragedy = ex; - } else if (tragedy != ex) { - // it should be safe to call closeWithTragicEvents on multiple layers without - // worrying about self suppression. - tragedy.addSuppressed(ex); - } + tragedy.setTragicException(ex); try { close(); } catch (final IOException | RuntimeException e) { @@ -315,7 +303,8 @@ public TranslogReader closeIntoReader() throws IOException { if (closed.compareAndSet(false, true)) { return new TranslogReader(getLastSyncedCheckpoint(), channel, path, header); } else { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", + tragedy.get()); } } } @@ -425,7 +414,7 @@ Checkpoint getLastSyncedCheckpoint() { protected final void ensureOpen() { if (isClosed()) { - throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy); + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy.get()); } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 39346fecbef25..5c097ba774f4a 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -233,7 +233,7 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon @Override protected void doStop() { - ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); + ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory(settings, "indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 1ff8b701a83e1..84c337399d5b4 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -91,7 +91,8 @@ public void removeListener(TransportConnectionListener listener) { } public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { - return transport.openConnection(node, ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile)); + ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + return internalOpenConnection(node, resolvedProfile); } /** @@ -115,7 +116,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil } boolean success = false; try { - connection = transport.openConnection(node, resolvedProfile); + connection = internalOpenConnection(node, resolvedProfile); connectionValidator.accept(connection, resolvedProfile); // we acquire a connection lock, so no way there is an existing connection connectedNodes.put(node, connection); @@ -227,6 +228,19 @@ public void close() { } } + private Transport.Connection internalOpenConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + Transport.Connection connection = transport.openConnection(node, connectionProfile); + try { + connectionListener.onConnectionOpened(connection); + } finally { + connection.addCloseListener(ActionListener.wrap(() -> connectionListener.onConnectionClosed(connection))); + } + if (connection.isClosed()) { + throw new ConnectTransportException(node, "a channel closed while connecting"); + } + return connection; + } + private void ensureOpen() { if (lifecycle.started() == false) { throw new IllegalStateException("connection manager is closed"); @@ -289,6 +303,20 @@ public void onNodeConnected(DiscoveryNode node) { listener.onNodeConnected(node); } } + + @Override + public void onConnectionOpened(Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionOpened(connection); + } + } + + @Override + public void onConnectionClosed(Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionClosed(connection); + } + } } static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 0b82417cfaa04..d71e459fccdf0 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -184,7 +184,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final NetworkService networkService; protected final Set profileSettings; - private final DelegatingTransportConnectionListener transportListener = new DelegatingTransportConnectionListener(); + private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener(); private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); @@ -248,14 +248,12 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo protected void doStart() { } - @Override - public void addConnectionListener(TransportConnectionListener listener) { - transportListener.listeners.add(listener); + public void addMessageListener(TransportMessageListener listener) { + messageListener.listeners.add(listener); } - @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { - return transportListener.listeners.remove(listener); + public boolean removeMessageListener(TransportMessageListener listener) { + return messageListener.listeners.remove(listener); } @Override @@ -344,10 +342,6 @@ public TcpChannel channel(TransportRequestOptions.Type type) { return connectionTypeHandle.getChannel(channels); } - boolean allChannelsOpen() { - return channels.stream().allMatch(TcpChannel::isOpen); - } - @Override public boolean sendPing() { for (TcpChannel channel : channels) { @@ -447,7 +441,7 @@ public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connect try { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); connectionFutures.add(connectFuture); - TcpChannel channel = initiateChannel(node.getAddress().address(), connectFuture); + TcpChannel channel = initiateChannel(node, connectFuture); logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); channels.add(channel); } catch (Exception e) { @@ -481,11 +475,6 @@ public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connect // underlying channels. nodeChannels = new NodeChannels(node, channels, connectionProfile, version); final NodeChannels finalNodeChannels = nodeChannels; - try { - transportListener.onConnectionOpened(nodeChannels); - } finally { - nodeChannels.addCloseListener(ActionListener.wrap(() -> transportListener.onConnectionClosed(finalNodeChannels))); - } Consumer onClose = c -> { assert c.isOpen() == false : "channel is still open when onClose is called"; @@ -493,10 +482,6 @@ public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connect }; nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch)))); - - if (nodeChannels.allChannelsOpen() == false) { - throw new ConnectTransportException(node, "a channel closed while connecting"); - } success = true; return nodeChannels; } catch (ConnectTransportException e) { @@ -856,12 +841,12 @@ protected void serverAcceptedChannel(TcpChannel channel) { /** * Initiate a single tcp socket channel. * - * @param address address for the initiated connection + * @param node for the initiated connection * @param connectListener listener to be called when connection complete * @return the pending connection * @throws IOException if an I/O exception occurs while opening the channel */ - protected abstract TcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException; + protected abstract TcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException; /** * Called to tear down internal resources @@ -907,7 +892,7 @@ private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel cha final TransportRequestOptions finalOptions = options; // this might be called in a different thread SendListener onRequestSent = new SendListener(channel, stream, - () -> transportListener.onRequestSent(node, requestId, action, request, finalOptions), message.length()); + () -> messageListener.onRequestSent(node, requestId, action, request, finalOptions), message.length()); internalSendMessage(channel, message, onRequestSent); addedReleaseListener = true; } finally { @@ -961,7 +946,7 @@ public void sendErrorResponse( final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); CompositeBytesReference message = new CompositeBytesReference(header, bytes); SendListener onResponseSent = new SendListener(channel, null, - () -> transportListener.onResponseSent(requestId, action, error), message.length()); + () -> messageListener.onResponseSent(requestId, action, error), message.length()); internalSendMessage(channel, message, onResponseSent); } } @@ -1010,7 +995,7 @@ private void sendResponse( final TransportResponseOptions finalOptions = options; // this might be called in a different thread SendListener listener = new SendListener(channel, stream, - () -> transportListener.onResponseSent(requestId, action, response, finalOptions), message.length()); + () -> messageListener.onResponseSent(requestId, action, response, finalOptions), message.length()); internalSendMessage(channel, message, listener); addedReleaseListener = true; } finally { @@ -1266,7 +1251,7 @@ public final void messageReceived(BytesReference reference, TcpChannel channel) if (isHandshake) { handler = pendingHandshakes.remove(requestId); } else { - TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, transportListener); + TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, messageListener); if (theHandler == null && TransportStatus.isError(status)) { handler = pendingHandshakes.remove(requestId); } else { @@ -1373,7 +1358,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str features = Collections.emptySet(); } final String action = stream.readString(); - transportListener.onRequestReceived(requestId, action); + messageListener.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { if (TransportStatus.isHandshake(status)) { @@ -1682,26 +1667,27 @@ public ProfileSettings(Settings settings, String profileName) { } } - private static final class DelegatingTransportConnectionListener implements TransportConnectionListener { - private final List listeners = new CopyOnWriteArrayList<>(); + private static final class DelegatingTransportMessageListener implements TransportMessageListener { + + private final List listeners = new CopyOnWriteArrayList<>(); @Override public void onRequestReceived(long requestId, String action) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onRequestReceived(requestId, action); } } @Override public void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onResponseSent(requestId, action, response, finalOptions); } } @Override public void onResponseSent(long requestId, String action, Exception error) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onResponseSent(requestId, action, error); } } @@ -1709,42 +1695,14 @@ public void onResponseSent(long requestId, String action, Exception error) { @Override public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions finalOptions) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onRequestSent(node, requestId, action, request, finalOptions); } } - @Override - public void onNodeDisconnected(DiscoveryNode key) { - for (TransportConnectionListener listener : listeners) { - listener.onNodeDisconnected(key); - } - } - - @Override - public void onConnectionOpened(Connection nodeChannels) { - for (TransportConnectionListener listener : listeners) { - listener.onConnectionOpened(nodeChannels); - } - } - - @Override - public void onNodeConnected(DiscoveryNode node) { - for (TransportConnectionListener listener : listeners) { - listener.onNodeConnected(node); - } - } - - @Override - public void onConnectionClosed(Connection nodeChannels) { - for (TransportConnectionListener listener : listeners) { - listener.onConnectionClosed(nodeChannels); - } - } - @Override public void onResponseReceived(long requestId, ResponseContext holder) { - for (TransportConnectionListener listener : listeners) { + for (TransportMessageListener listener : listeners) { listener.onResponseReceived(requestId, holder); } } diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index 9538119f43b8a..90adf2ab9e7d4 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -56,18 +56,9 @@ public interface Transport extends LifecycleComponent { */ RequestHandlerRegistry getRequestHandler(String action); - /** - * Adds a new event listener - * @param listener the listener to add - */ - void addConnectionListener(TransportConnectionListener listener); + void addMessageListener(TransportMessageListener listener); - /** - * Removes an event listener - * @param listener the listener to remove - * @return true iff the listener was removed otherwise false - */ - boolean removeConnectionListener(TransportConnectionListener listener); + boolean removeMessageListener(TransportMessageListener listener); /** * The address the transport is bound on. @@ -254,7 +245,7 @@ public List prune(Predicate predicate) { * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not * found. */ - public TransportResponseHandler onResponseReceived(final long requestId, TransportConnectionListener listener) { + public TransportResponseHandler onResponseReceived(final long requestId, TransportMessageListener listener) { ResponseContext context = handlers.remove(requestId); listener.onResponseReceived(requestId, context); if (context == null) { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java b/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java index 0ee2ed5828d44..c41a328637c22 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java @@ -28,42 +28,6 @@ */ public interface TransportConnectionListener { - /** - * Called once a request is received - * @param requestId the internal request ID - * @param action the request action - * - */ - default void onRequestReceived(long requestId, String action) {} - - /** - * Called for every action response sent after the response has been passed to the underlying network implementation. - * @param requestId the request ID (unique per client) - * @param action the request action - * @param response the response send - * @param finalOptions the response options - */ - default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {} - - /*** - * Called for every failed action response after the response has been passed to the underlying network implementation. - * @param requestId the request ID (unique per client) - * @param action the request action - * @param error the error sent back to the caller - */ - default void onResponseSent(long requestId, String action, Exception error) {} - - /** - * Called for every request sent to a server after the request has been passed to the underlying network implementation - * @param node the node the request was sent to - * @param requestId the internal request id - * @param action the action name - * @param request the actual request - * @param finalOptions the request options - */ - default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, - TransportRequestOptions finalOptions) {} - /** * Called once a connection was opened * @param connection the connection @@ -76,13 +40,6 @@ default void onConnectionOpened(Transport.Connection connection) {} */ default void onConnectionClosed(Transport.Connection connection) {} - /** - * Called for every response received - * @param requestId the request id for this reponse - * @param context the response context or null if the context was already processed ie. due to a timeout. - */ - default void onResponseReceived(long requestId, Transport.ResponseContext context) {} - /** * Called once a node connection is opened and registered. */ diff --git a/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java b/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java new file mode 100644 index 0000000000000..a872c761b36d0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.cluster.node.DiscoveryNode; + +public interface TransportMessageListener { + + /** + * Called once a request is received + * @param requestId the internal request ID + * @param action the request action + * + */ + default void onRequestReceived(long requestId, String action) {} + + /** + * Called for every action response sent after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param response the response send + * @param finalOptions the response options + */ + default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {} + + /*** + * Called for every failed action response after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param error the error sent back to the caller + */ + default void onResponseSent(long requestId, String action, Exception error) {} + + /** + * Called for every request sent to a server after the request has been passed to the underlying network implementation + * @param node the node the request was sent to + * @param requestId the internal request id + * @param action the action name + * @param request the actual request + * @param finalOptions the request options + */ + default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions finalOptions) {} + + /** + * Called for every response received + * @param requestId the request id for this reponse + * @param context the response context or null if the context was already processed ie. due to a timeout. + */ + default void onResponseReceived(long requestId, Transport.ResponseContext context) {} +} diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 8eca6504b70be..fb14ae96dbf20 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -77,7 +77,7 @@ import static org.elasticsearch.common.settings.Setting.listSetting; import static org.elasticsearch.common.settings.Setting.timeSetting; -public class TransportService extends AbstractLifecycleComponent implements TransportConnectionListener { +public class TransportService extends AbstractLifecycleComponent implements TransportMessageListener, TransportConnectionListener { public static final Setting CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); @@ -248,7 +248,8 @@ void setTracerLogExclude(List tracerLogExclude) { @Override protected void doStart() { - transport.addConnectionListener(this); + transport.addMessageListener(this); + connectionManager.addListener(this); transport.start(); if (transport.boundAddress() != null && logger.isInfoEnabled()) { logger.info("{}", transport.boundAddress()); @@ -506,12 +507,10 @@ public void disconnectFromNode(DiscoveryNode node) { } public void addConnectionListener(TransportConnectionListener listener) { - transport.addConnectionListener(listener); connectionManager.addListener(listener); } public void removeConnectionListener(TransportConnectionListener listener) { - transport.removeConnectionListener(listener); connectionManager.removeListener(listener); } diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 513f07b733cda..e9ff4048bfe15 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -38,8 +38,8 @@ import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -62,7 +62,7 @@ abstract class FailAndRetryMockTransport imp private volatile Map requestHandlers = Collections.emptyMap(); private final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportConnectionListener listener; + private TransportMessageListener listener; private boolean connectMode = true; @@ -223,13 +223,15 @@ public RequestHandlerRegistry getRequestHandler(String action) { return requestHandlers.get(action); } + @Override - public void addConnectionListener(TransportConnectionListener listener) { + public void addMessageListener(TransportMessageListener listener) { this.listener = listener; } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { + public boolean removeMessageListener(TransportMessageListener listener) { throw new UnsupportedOperationException(); } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 92c1016d3e615..473f5152e8fd7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -37,9 +37,9 @@ import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -107,7 +107,6 @@ public void testConnectAndDisconnect() { assertConnectedExactlyToNodes(event.state()); } - public void testReconnect() { List nodes = generateNodes(); NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, threadPool, transportService); @@ -188,7 +187,7 @@ public HandshakeResponse handshake(Transport.Connection connection, long timeout private final class MockTransport implements Transport { private ResponseHandlers responseHandlers = new ResponseHandlers(); private volatile boolean randomConnectionExceptions = false; - private TransportConnectionListener listener = new TransportConnectionListener() { + private TransportMessageListener listener = new TransportMessageListener() { }; @Override @@ -201,12 +200,12 @@ public RequestHandlerRegistry getRequestHandler(String action) { } @Override - public void addConnectionListener(TransportConnectionListener listener) { + public void addMessageListener(TransportMessageListener listener) { this.listener = listener; } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { + public boolean removeMessageListener(TransportMessageListener listener) { throw new UnsupportedOperationException(); } @@ -231,7 +230,6 @@ public Connection openConnection(DiscoveryNode node, ConnectionProfile connectio if (randomConnectionExceptions && randomBoolean()) { throw new ConnectTransportException(node, "simulated"); } - listener.onNodeConnected(node); } Connection connection = new Connection() { @Override @@ -260,7 +258,6 @@ public boolean isClosed() { return false; } }; - listener.onConnectionOpened(connection); return connection; } diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java index 0074da43fcfb8..520f80fecac44 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java @@ -59,7 +59,7 @@ public void testParseFromXContent() throws IOException { Float floatRep = randomFloat(); Number value = intValue; if (randomBoolean()) { - value = new Float(floatRep += intValue); + value = Float.valueOf(floatRep += intValue); } XContentBuilder json = jsonBuilder().startObject() .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value) diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index cc6152d98962f..a0fdcbf51ca1d 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -337,7 +337,7 @@ public void testInheritContext() throws InterruptedException { final CountDownLatch executed = new CountDownLatch(1); threadContext.putHeader("foo", "bar"); - final Integer one = new Integer(1); + final Integer one = Integer.valueOf(1); threadContext.putTransient("foo", one); EsThreadPoolExecutor executor = EsExecutors.newFixed(getName(), pool, queue, EsExecutors.daemonThreadFactory("dummy"), threadContext); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index e71efa46424b2..a0a92cad7a851 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -42,7 +42,7 @@ public void testStashContext() { threadContext.putHeader("foo", "bar"); threadContext.putTransient("ctx.foo", 1); assertEquals("bar", threadContext.getHeader("foo")); - assertEquals(new Integer(1), threadContext.getTransient("ctx.foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); assertEquals("1", threadContext.getHeader("default")); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { assertNull(threadContext.getHeader("foo")); @@ -61,7 +61,7 @@ public void testStashAndMerge() { threadContext.putHeader("foo", "bar"); threadContext.putTransient("ctx.foo", 1); assertEquals("bar", threadContext.getHeader("foo")); - assertEquals(new Integer(1), threadContext.getTransient("ctx.foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); assertEquals("1", threadContext.getHeader("default")); HashMap toMerge = new HashMap<>(); toMerge.put("foo", "baz"); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 67049bc3c1c25..45f77036ba649 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4447,13 +4447,18 @@ public void testAcquireIndexCommit() throws Exception { public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); - store = createStore(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", + Settings.builder().put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1).build()); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + try (Store store = createStore(); + InternalEngine engine = + createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { index(engine, docId); - if (frequently()) { + if (rarely()) { engine.flush(randomBoolean(), randomBoolean()); } } @@ -4467,6 +4472,7 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + assertThat(engine.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)); } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index 9ae502fecb580..c8d4dbd43df2f 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ private Tuple, TranslogWriter> createReadersAndWriter(final } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L, randomNonNegativeLong()); + () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder()); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 1c27a59e0ecbe..4ec479334ba67 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Assertions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -108,6 +109,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; @@ -1655,7 +1657,7 @@ public void testRandomExceptionsOnTrimOperations( ) throws Exception { } assertThat(expectedException, is(not(nullValue()))); - + assertThat(failableTLog.getTragicException(), equalTo(expectedException)); assertThat(fileChannels, is(not(empty()))); assertThat("all file channels have to be closed", fileChannels.stream().filter(f -> f.isOpen()).findFirst().isPresent(), is(false)); @@ -2505,11 +2507,13 @@ public void testWithRandomException() throws IOException { syncedDocs.addAll(unsynced); unsynced.clear(); } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { - // fair enough + assertEquals(failableTLog.getTragicException(), ex); } catch (IOException ex) { assertEquals(ex.getMessage(), "__FAKE__ no space left on device"); + assertEquals(failableTLog.getTragicException(), ex); } catch (RuntimeException ex) { assertEquals(ex.getMessage(), "simulated"); + assertEquals(failableTLog.getTragicException(), ex); } finally { Checkpoint checkpoint = Translog.readCheckpoint(config.getTranslogPath()); if (checkpoint.numOps == unsynced.size() + syncedDocs.size()) { @@ -2931,6 +2935,47 @@ public void testCloseSnapshotTwice() throws Exception { } } + // close method should never be called directly from Translog (the only exception is closeOnTragicEvent) + public void testTranslogCloseInvariant() throws IOException { + assumeTrue("test only works with assertions enabled", Assertions.ENABLED); + class MisbehavingTranslog extends Translog { + MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier); + } + + void callCloseDirectly() throws IOException { + close(); + } + + void callCloseUsingIOUtilsWithExceptionHandling() { + IOUtils.closeWhileHandlingException(this); + } + + void callCloseUsingIOUtils() throws IOException { + IOUtils.close(this); + } + + void callCloseOnTragicEvent() { + Exception e = new Exception("test tragic exception"); + tragedy.setTragicException(e); + closeOnTragicEvent(e); + } + } + + + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + Path path = createTempDir(); + final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); + + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseDirectly()); + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtils()); + expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtilsWithExceptionHandling()); + misbehavingTranslog.callCloseOnTragicEvent(); + } + static class SortedSnapshot implements Translog.Snapshot { private final Translog.Snapshot snapshot; private List operations = null; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 3bb7995903343..fae1b1662f250 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -74,7 +74,6 @@ public void testTranslogHistoryTransferred() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32089") public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index c000b7fb22891..f62598fa7c317 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -108,8 +108,14 @@ protected Map, Object>> pluginScripts() { aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars), XContentMapValues.extractValue("params.param2", vars)))); - scripts.put("vars.multiplier = 3", vars -> - ((Map) vars.get("vars")).put("multiplier", 3)); + scripts.put("vars.multiplier = 3", vars -> { + ((Map) vars.get("vars")).put("multiplier", 3); + + Map state = (Map) vars.get("state"); + state.put("list", new ArrayList()); + + return state; + }); scripts.put("state.list.add(vars.multiplier)", vars -> aggScript(vars, state -> { diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 37359d9f20d71..50f2261c722a1 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -601,10 +601,10 @@ private static void setRandomCommonOptions(AbstractHighlighterBuilder highlightB value = randomAlphaOfLengthBetween(1, 10); break; case 1: - value = new Integer(randomInt(1000)); + value = Integer.valueOf(randomInt(1000)); break; case 2: - value = new Boolean(randomBoolean()); + value = Boolean.valueOf(randomBoolean()); break; } options.put(randomAlphaOfLengthBetween(1, 10), value); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index a3d2e1bbc574e..0b6112eb51c90 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -188,7 +188,7 @@ protected FakeChannel bind(String name, InetSocketAddress address) throws IOExce } @Override - protected FakeChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected FakeChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { return new FakeChannel(messageCaptor); } diff --git a/settings.gradle b/settings.gradle index bdd866e622bcd..bdae1c396fda4 100644 --- a/settings.gradle +++ b/settings.gradle @@ -84,6 +84,7 @@ if (isEclipse) { // for server-src and server-tests projects << 'server-tests' projects << 'libs:core-tests' + projects << 'libs:dissect-tests' projects << 'libs:nio-tests' projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' @@ -103,6 +104,10 @@ if (isEclipse) { project(":libs:core").buildFileName = 'eclipse-build.gradle' project(":libs:core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test') project(":libs:core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:dissect").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/main') + project(":libs:dissect").buildFileName = 'eclipse-build.gradle' + project(":libs:dissect-tests").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/test') + project(":libs:dissect-tests").buildFileName = 'eclipse-build.gradle' project(":libs:nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main') project(":libs:nio").buildFileName = 'eclipse-build.gradle' project(":libs:nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test') diff --git a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java index 76cfcce033b11..44b845e5c2cac 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java @@ -33,7 +33,7 @@ */ public class EqualsHashCodeTestUtils { - private static Object[] someObjects = new Object[] { "some string", new Integer(1), new Double(1.0) }; + private static Object[] someObjects = new Object[] { "some string", Integer.valueOf(1), Double.valueOf(1.0) }; /** * A function that makes a copy of its input argument diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index a7d3e85d3009d..60133a16a10af 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -41,9 +41,9 @@ import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -72,7 +72,7 @@ public class CapturingTransport implements Transport { private volatile Map requestHandlers = Collections.emptyMap(); final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportConnectionListener listener; + private TransportMessageListener listener; public static class CapturedRequest { public final DiscoveryNode node; @@ -341,7 +341,7 @@ public RequestHandlerRegistry getRequestHandler(String action) { } @Override - public void addConnectionListener(TransportConnectionListener listener) { + public void addMessageListener(TransportMessageListener listener) { if (this.listener != null) { throw new IllegalStateException("listener already set"); } @@ -349,11 +349,12 @@ public void addConnectionListener(TransportConnectionListener listener) { } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { + public boolean removeMessageListener(TransportMessageListener listener) { if (listener == this.listener) { this.listener = null; return true; } return false; } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 5a0dd3b7f6d53..2e78f8a9a4f04 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -29,8 +29,8 @@ import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportMessageListener; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportStats; @@ -86,13 +86,13 @@ Transport getDelegate() { } @Override - public void addConnectionListener(TransportConnectionListener listener) { - delegate.addConnectionListener(listener); + public void addMessageListener(TransportMessageListener listener) { + delegate.addMessageListener(listener); } @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { - return delegate.removeConnectionListener(listener); + public boolean removeMessageListener(TransportMessageListener listener) { + return delegate.removeMessageListener(listener); } @Override @@ -179,7 +179,7 @@ public Map profileBoundAddresses() { return delegate.profileBoundAddresses(); } - private class WrappedConnection implements Transport.Connection { + public class WrappedConnection implements Transport.Connection { private final Transport.Connection connection; @@ -234,6 +234,10 @@ public Object getCacheKey() { public void close() { connection.close(); } + + public Transport.Connection getConnection() { + return connection; + } } @FunctionalInterface diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 29997b16ba071..4e59aaecf8de2 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.ClusterSettings; @@ -52,6 +53,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -2642,15 +2644,22 @@ public void testProfilesIncludesDefault() { public void testChannelCloseWhileConnecting() { try (MockTransportService service = build(Settings.builder().put("name", "close").build(), version0, null, true)) { - service.transport.addConnectionListener(new TransportConnectionListener() { + AtomicBoolean connectionClosedListenerCalled = new AtomicBoolean(false); + service.addConnectionListener(new TransportConnectionListener() { @Override public void onConnectionOpened(final Transport.Connection connection) { + closeConnectionChannel(connection); try { - closeConnectionChannel(service.getOriginalTransport(), connection); - } catch (final IOException e) { + assertBusy(connection::isClosed); + } catch (Exception e) { throw new AssertionError(e); } } + + @Override + public void onConnectionClosed(Transport.Connection connection) { + connectionClosedListenerCalled.set(true); + } }); final ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, @@ -2662,10 +2671,15 @@ public void onConnectionOpened(final Transport.Connection connection) { final ConnectTransportException e = expectThrows(ConnectTransportException.class, () -> service.openConnection(nodeA, builder.build())); assertThat(e, hasToString(containsString(("a channel closed while connecting")))); + assertTrue(connectionClosedListenerCalled.get()); } } - protected abstract void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException; + private void closeConnectionChannel(Transport.Connection connection) { + StubbableTransport.WrappedConnection wrappedConnection = (StubbableTransport.WrappedConnection) connection; + TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) wrappedConnection.getConnection(); + CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); + } @SuppressForbidden(reason = "need local ephemeral port") private InetSocketAddress getLocalEphemeral() throws UnknownHostException { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index e6d80ac24d88e..996508bdb887a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -162,7 +163,8 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx @Override @SuppressForbidden(reason = "real socket for mocking remote connections") - protected MockChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected MockChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); final MockSocket socket = new MockSocket(); final MockChannel channel = new MockChannel(socket, address, "none"); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index fbe61db6ee721..19543cfdcbb15 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -85,7 +86,8 @@ protected MockServerChannel bind(String name, InetSocketAddress address) throws } @Override - protected MockSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + protected MockSocketChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + InetSocketAddress address = node.getAddress().address(); MockSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index 6d1e5116474ff..42658b1d9a60f 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -60,13 +59,4 @@ protected Version executeHandshake(DiscoveryNode node, TcpChannel mockChannel, T public int channelsPerNodeConnection() { return 1; } - - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - final MockTcpTransport t = (MockTcpTransport) transport; - final TcpTransport.NodeChannels channels = - (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java index 3a78f366bd87f..8b3e7dce367d8 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -99,12 +98,6 @@ protected int channelsPerNodeConnection() { return 3; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc new file mode 100644 index 0000000000000..30968355f3ca5 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc @@ -0,0 +1,170 @@ +[role="xpack"] +[[configuring-kerberos-realm]] +=== Configuring a Kerberos realm + +Kerberos is used to protect services and uses a ticket-based authentication +protocol to authenticate users. +You can configure {es} to use the Kerberos V5 authentication protocol, which is +an industry standard protocol, to authenticate users. +In this scenario, clients must present Kerberos tickets for authentication. + +In Kerberos, users authenticate with an authentication service and later +with a ticket granting service to generate a TGT (ticket-granting ticket). +This ticket is then presented to the service for authentication. +Refer to your Kerberos installation documentation for more information about +obtaining TGT. {es} clients must first obtain a TGT then initiate the process of +authenticating with {es}. + +For a summary of Kerberos terminology, see {stack-ov}/kerberos-realm.html[Kerberos authentication]. + +==== Before you begin + +. Deploy Kerberos. ++ +-- +You must have the Kerberos infrastructure set up in your environment. + +NOTE: Kerberos requires a lot of external services to function properly, such as +time synchronization between all machines and working forward and reverse DNS +mappings in your domain. Refer to your Kerberos documentation for more details. + +These instructions do not cover setting up and configuring your Kerberos +deployment. Where examples are provided, they pertain to an MIT Kerberos V5 +deployment. For more information, see +http://web.mit.edu/kerberos/www/index.html[MIT Kerberos documentation] +-- + +. Configure Java GSS. ++ +-- + +{es} uses Java GSS framework support for Kerberos authentication. +To support Kerberos authentication, {es} needs the following files: + +* `krb5.conf`, a Kerberos configuration file +* A `keytab` file that contains credentials for the {es} service principal + +The configuration requirements depend on your Kerberos setup. Refer to your +Kerberos documentation to configure the `krb5.conf` file. + +For more information on Java GSS, see +https://docs.oracle.com/javase/10/security/kerberos-requirements1.htm[Java GSS Kerberos requirements] +-- + +==== Create a Kerberos realm + +To configure a Kerberos realm in {es}: + +. Configure the JVM to find the Kerberos configuration file. ++ +-- +{es} uses Java GSS and JAAS Krb5LoginModule to support Kerberos authentication +using a Simple and Protected GSSAPI Negotiation Mechanism (SPNEGO) mechanism. +The Kerberos configuration file (`krb5.conf`) provides information such as the +default realm, the Key Distribution Center (KDC), and other configuration details +required for Kerberos authentication. When the JVM needs some configuration +properties, it tries to find those values by locating and loading this file. The +JVM system property to configure the file path is `java.security.krb5.conf`. To +configure JVM system properties see {ref}/jvm-options.html[configuring jvm options]. +If this system property is not specified, Java tries to locate the file based on +the conventions. + +TIP: It is recommended that this system property be configured for {es}. +The method for setting this property depends on your Kerberos infrastructure. +Refer to your Kerberos documentation for more details. + +For more information, see http://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html[krb5.conf] + +-- + +. Create a keytab for the {es} node. ++ +-- +A keytab is a file that stores pairs of principals and encryption keys. {es} +uses the keys from the keytab to decrypt the tickets presented by the user. You +must create a keytab for {es} by using the tools provided by your Kerberos +implementation. For example, some tools that create keytabs are `ktpass.exe` on +Windows and `kadmin` for MIT Kerberos. +-- + +. Put the keytab file in the {es} configuration directory. ++ +-- +Make sure that this keytab file has read permissions. This file contains +credentials, therefore you must take appropriate measures to protect it. + +IMPORTANT: {es} uses Kerberos on the HTTP network layer, therefore there must be +a keytab file for the HTTP service principal on every {es} node. The service +principal name must have the format `HTTP/es.domain.local@ES.DOMAIN.LOCAL`. +The keytab files are unique for each node since they include the hostname. +An {es} node can act as any principal a client requests as long as that +principal and its credentials are found in the configured keytab. + +-- + +. Create a Kerberos realm. ++ +-- + +To enable Kerberos authentication in {es}, you must add a Kerberos realm in the +realm chain. + +NOTE: You can configure only one Kerberos realm on {es} nodes. + +To configure a Kerberos realm, there are a few mandatory realm settings and +other optional settings that you need to configure in the `elasticsearch.yml` +configuration file. Add a realm of type `kerberos` under the +`xpack.security.authc.realms` namespace. + +The most common configuration for a Kerberos realm is as follows: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.kerb1: + type: kerberos + order: 3 + keytab.path: es.keytab + remove_realm_name: false +------------------------------------------------------------ + +The `username` is extracted from the ticket presented by user and usually has +the format `username@REALM`. This `username` is used for mapping +roles to the user. If realm setting `remove_realm_name` is +set to `true`, the realm part (`@REALM`) is removed. The resulting `username` +is used for role mapping. + +For detailed information of available realm settings, +see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. + +-- + +. Restart {es} + +. Map Kerberos users to roles. ++ +-- + +The `kerberos` realm enables you to map Kerberos users to roles. You can +configure these role mappings by using the +{ref}/security-api-role-mapping.html[role-mapping API]. You identify +users by their `username` field. + +The following example uses the role mapping API to map `user@REALM` to the roles +`monitoring` and `user`: + +[source,js] +-------------------------------------------------- +POST _xpack/security/role_mapping/kerbrolemapping +{ + "roles" : [ "monitoring_user" ], + "enabled": true, + "rules" : { + "field" : { "username" : "user@REALM" } + } +} +-------------------------------------------------- +// CONSOLE + +For more information, see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +-- + diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 53f36afc73481..a13547263a582 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -77,6 +77,7 @@ user API. ** <>. ** <>. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -142,5 +143,7 @@ include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc +include::authentication/configuring-kerberos-realm.asciidoc[] include::{es-repo-dir}/settings/security-settings.asciidoc[] include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index e087438924394..0619aef6961cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -120,7 +120,7 @@ public LicenseService(Settings settings, ClusterService clusterService, Clock cl super(settings); this.clusterService = clusterService; this.clock = clock; - this.scheduler = new SchedulerEngine(clock); + this.scheduler = new SchedulerEngine(settings, clock); this.licenseState = licenseState; this.operationModeFileWatcher = new OperationModeFileWatcher(resourceWatcherService, XPackPlugin.resolveConfigFile(env, "license_mode"), logger, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java new file mode 100644 index 0000000000000..043224e357b91 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Checks remote clusters for license compatibility with a specified license predicate. + */ +public final class RemoteClusterLicenseChecker { + + /** + * Encapsulates the license info of a remote cluster. + */ + public static final class RemoteClusterLicenseInfo { + + private final String clusterAlias; + + /** + * The alias of the remote cluster. + * + * @return the cluster alias + */ + public String clusterAlias() { + return clusterAlias; + } + + private final XPackInfoResponse.LicenseInfo licenseInfo; + + /** + * The license info of the remote cluster. + * + * @return the license info + */ + public XPackInfoResponse.LicenseInfo licenseInfo() { + return licenseInfo; + } + + RemoteClusterLicenseInfo(final String clusterAlias, final XPackInfoResponse.LicenseInfo licenseInfo) { + this.clusterAlias = clusterAlias; + this.licenseInfo = licenseInfo; + } + + } + + /** + * Encapsulates a remote cluster license check. The check is either successful if the license of the remote cluster is compatible with + * the predicate used to check license compatibility, or the check is a failure. + */ + public static final class LicenseCheck { + + private final RemoteClusterLicenseInfo remoteClusterLicenseInfo; + + /** + * The remote cluster license info. This method should only be invoked if this instance represents a failing license check. + * + * @return the remote cluster license info + */ + public RemoteClusterLicenseInfo remoteClusterLicenseInfo() { + assert isSuccess() == false; + return remoteClusterLicenseInfo; + } + + private static final LicenseCheck SUCCESS = new LicenseCheck(null); + + /** + * A successful license check. + * + * @return a successful license check instance + */ + public static LicenseCheck success() { + return SUCCESS; + } + + /** + * Test if this instance represents a successful license check. + * + * @return true if this instance represents a successful license check, otherwise false + */ + public boolean isSuccess() { + return this == SUCCESS; + } + + /** + * Creates a failing license check encapsulating the specified remote cluster license info. + * + * @param remoteClusterLicenseInfo the remote cluster license info + * @return a failing license check + */ + public static LicenseCheck failure(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + return new LicenseCheck(remoteClusterLicenseInfo); + } + + private LicenseCheck(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + this.remoteClusterLicenseInfo = remoteClusterLicenseInfo; + } + + } + + private final Client client; + private final Predicate predicate; + + /** + * Constructs a remote cluster license checker with the specified license predicate for checking license compatibility. The predicate + * does not need to check for the active license state as this is handled by the remote cluster license checker. + * + * @param client the client + * @param predicate the license predicate + */ + public RemoteClusterLicenseChecker(final Client client, final Predicate predicate) { + this.client = client; + this.predicate = predicate; + } + + public static boolean isLicensePlatinumOrTrial(final XPackInfoResponse.LicenseInfo licenseInfo) { + final License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); + return mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL; + } + + /** + * Checks the specified clusters for license compatibility. The specified callback will be invoked once if all clusters are + * license-compatible, otherwise the specified callback will be invoked once on the first cluster that is not license-compatible. + * + * @param clusterAliases the cluster aliases to check + * @param listener a callback + */ + public void checkRemoteClusterLicenses(final List clusterAliases, final ActionListener listener) { + final Iterator clusterAliasesIterator = clusterAliases.iterator(); + if (clusterAliasesIterator.hasNext() == false) { + listener.onResponse(LicenseCheck.success()); + return; + } + + final AtomicReference clusterAlias = new AtomicReference<>(); + + final ActionListener infoListener = new ActionListener() { + + @Override + public void onResponse(final XPackInfoResponse xPackInfoResponse) { + final XPackInfoResponse.LicenseInfo licenseInfo = xPackInfoResponse.getLicenseInfo(); + if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false || predicate.test(licenseInfo) == false) { + listener.onResponse(LicenseCheck.failure(new RemoteClusterLicenseInfo(clusterAlias.get(), licenseInfo))); + return; + } + + if (clusterAliasesIterator.hasNext()) { + clusterAlias.set(clusterAliasesIterator.next()); + // recurse to the next cluster + remoteClusterLicense(clusterAlias.get(), this); + } else { + listener.onResponse(LicenseCheck.success()); + } + } + + @Override + public void onFailure(final Exception e) { + final String message = "could not determine the license type for cluster [" + clusterAlias.get() + "]"; + listener.onFailure(new ElasticsearchException(message, e)); + } + + }; + + // check the license on the first cluster, and then we recursively check licenses on the remaining clusters + clusterAlias.set(clusterAliasesIterator.next()); + remoteClusterLicense(clusterAlias.get(), infoListener); + } + + private void remoteClusterLicense(final String clusterAlias, final ActionListener listener) { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final ContextPreservingActionListener contextPreservingActionListener = + new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any existing context information + threadContext.markAsSystemContext(); + + final XPackInfoRequest request = new XPackInfoRequest(); + request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); + try { + client.getRemoteClusterClient(clusterAlias).execute(XPackInfoAction.INSTANCE, request, contextPreservingActionListener); + } catch (final Exception e) { + contextPreservingActionListener.onFailure(e); + } + } + } + + /** + * Predicate to test if the index name represents the name of a remote index. + * + * @param index the index name + * @return true if the collection of indices contains a remote index, otherwise false + */ + public static boolean isRemoteIndex(final String index) { + return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; + } + + /** + * Predicate to test if the collection of index names contains any that represent the name of a remote index. + * + * @param indices the collection of index names + * @return true if the collection of index names contains a name that represents a remote index, otherwise false + */ + public static boolean containsRemoteIndex(final List indices) { + return indices.stream().anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); + } + + /** + * Filters the collection of index names for names that represent a remote index. Remote index names are of the form + * {@code cluster_name:index_name}. + * + * @param indices the collection of index names + * @return list of index names that represent remote index names + */ + public static List remoteIndices(final List indices) { + return indices.stream().filter(RemoteClusterLicenseChecker::isRemoteIndex).collect(Collectors.toList()); + } + + /** + * Extract the list of remote cluster aliases from the list of index names. Remote index names are of the form + * {@code cluster_alias:index_name} and the cluster_alias is extracted for each index name that represents a remote index. + * + * @param indices the collection of index names + * @return the remote cluster names + */ + public static List remoteClusterAliases(final List indices) { + return indices.stream() + .filter(RemoteClusterLicenseChecker::isRemoteIndex) + .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) + .distinct() + .collect(Collectors.toList()); + } + + /** + * Constructs an error message for license incompatibility. + * + * @param feature the name of the feature that initiated the remote cluster license check. + * @param remoteClusterLicenseInfo the remote cluster license info of the cluster that failed the license check + * @return an error message representing license incompatibility + */ + public static String buildErrorMessage( + final String feature, + final RemoteClusterLicenseInfo remoteClusterLicenseInfo, + final Predicate predicate) { + final StringBuilder error = new StringBuilder(); + if (remoteClusterLicenseInfo.licenseInfo().getStatus() != LicenseStatus.ACTIVE) { + error.append(String.format(Locale.ROOT, "the license on cluster [%s] is not active", remoteClusterLicenseInfo.clusterAlias())); + } else { + assert predicate.test(remoteClusterLicenseInfo.licenseInfo()) == false : "license must be incompatible to build error message"; + final String message = String.format( + Locale.ROOT, + "the license mode [%s] on cluster [%s] does not enable [%s]", + License.OperationMode.resolve(remoteClusterLicenseInfo.licenseInfo().getMode()), + remoteClusterLicenseInfo.clusterAlias(), + feature); + error.append(message); + } + + return error.toString(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java index e405c5e4e007e..ffc0257313b36 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.scheduler; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -92,9 +93,9 @@ public interface Schedule { private final Clock clock; private final List listeners = new CopyOnWriteArrayList<>(); - public SchedulerEngine(Clock clock) { + public SchedulerEngine(Settings settings, Clock clock) { this.clock = clock; - this.scheduler = Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory("trigger_engine_scheduler")); + this.scheduler = Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(settings, "trigger_engine_scheduler")); } public void register(Listener listener) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java new file mode 100644 index 0000000000000..a8627d2154209 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -0,0 +1,414 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public final class RemoteClusterLicenseCheckerTests extends ESTestCase { + + public void testIsNotRemoteIndex() { + assertFalse(RemoteClusterLicenseChecker.isRemoteIndex("local-index")); + } + + public void testIsRemoteIndex() { + assertTrue(RemoteClusterLicenseChecker.isRemoteIndex("remote-cluster:remote-index")); + } + + public void testNoRemoteIndex() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertFalse(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testRemoteIndex() { + final List indices = Arrays.asList("local-index", "remote-cluster:remote-index"); + assertTrue(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testNoRemoteIndices() { + final List indices = Collections.singletonList("local-index"); + assertThat(RemoteClusterLicenseChecker.remoteIndices(indices), is(empty())); + } + + public void testRemoteIndices() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:index1", "local-index2", "remote-cluster2:index1"); + assertThat( + RemoteClusterLicenseChecker.remoteIndices(indices), + containsInAnyOrder("remote-cluster1:index1", "remote-cluster2:index1")); + } + + public void testNoRemoteClusterAliases() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), empty()); + } + + public void testOneRemoteClusterAlias() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1")); + } + + public void testMoreThanOneRemoteClusterAlias() { + final List indices = Arrays.asList("remote-cluster1:remote-index1", "local-index1", "remote-cluster2:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testDuplicateRemoteClusterAlias() { + final List indices = Arrays.asList( + "remote-cluster1:remote-index1", "local-index1", "remote-cluster2:index1", "remote-cluster2:remote-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testCheckRemoteClusterLicensesGivenCompatibleLicenses() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertTrue(licenseCheck.get().isSuccess()); + } + + public void testCheckRemoteClusterLicensesGivenIncompatibleLicense() { + final AtomicInteger index = new AtomicInteger(); + final List remoteClusterAliases = Arrays.asList("good", "cluster-with-basic-license", "good2"); + final List responses = new ArrayList<>(); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertFalse(licenseCheck.get().isSuccess()); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().clusterAlias(), equalTo("cluster-with-basic-license")); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().licenseInfo().getType(), equalTo("BASIC")); + } + + public void testCheckRemoteClusterLicencesGivenNonExistentCluster() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final String failingClusterAlias = randomFrom(remoteClusterAliases); + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, failingClusterAlias); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + final AtomicReference exception = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + exception.set(e); + } + + })); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(ElasticsearchException.class)); + assertThat(exception.get().getMessage(), equalTo("could not determine the license type for cluster [" + failingClusterAlias + "]")); + assertNotNull(exception.get().getCause()); + assertThat(exception.get().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRemoteClusterLicenseCallUsesSystemContext() throws InterruptedException { + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + assertTrue(threadPool.getThreadContext().isSystemContext()); + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + + final List remoteClusterAliases = Collections.singletonList("valid"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, doubleInvocationProtectingListener(ActionListener.wrap(() -> {}))); + + verify(client, times(1)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + } finally { + terminate(threadPool); + } + } + + public void testListenerIsExecutedWithCallingContext() throws InterruptedException { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final Client client; + final boolean failure = randomBoolean(); + if (failure) { + client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, randomFrom(remoteClusterAliases)); + } else { + client = createMockClient(threadPool); + } + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + threadPool.getThreadContext().putHeader("key", "value"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (failure) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + @Override + public void onFailure(final Exception e) { + if (failure == false) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + })); + + assertTrue(listenerInvoked.get()); + } finally { + terminate(threadPool); + } + } + + public void testBuildErrorMessageForActiveCompatibleLicense() { + final XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); + final AssertionError e = expectThrows( + AssertionError.class, + () -> RemoteClusterLicenseChecker.buildErrorMessage("", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + assertThat(e, hasToString(containsString("license must be incompatible to build error message"))); + } + + public void testBuildErrorMessageForIncompatibleLicense() { + final XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license mode [BASIC] on cluster [basic-cluster] does not enable [Feature]")); + } + + public void testBuildErrorMessageForInactiveLicense() { + final XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license on cluster [expired-cluster] is not active")); + } + + private ActionListener doubleInvocationProtectingListener( + final ActionListener listener) { + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + return new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onResponse(response); + } + + @Override + public void onFailure(final Exception e) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onFailure(e); + } + + }; + } + + private ThreadPool createMockThreadPool() { + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + return threadPool; + } + + private Client createMockClient(final ThreadPool threadPool) { + return createMockClient(threadPool, client -> when(client.getRemoteClusterClient(anyString())).thenReturn(client)); + } + + private Client createMockClientThatThrowsOnGetRemoteClusterClient(final ThreadPool threadPool, final String clusterAlias) { + return createMockClient( + threadPool, + client -> { + when(client.getRemoteClusterClient(clusterAlias)).thenThrow(new IllegalArgumentException()); + when(client.getRemoteClusterClient(argThat(not(clusterAlias)))).thenReturn(client); + }); + } + + private Client createMockClient(final ThreadPool threadPool, final Consumer finish) { + final Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + finish.accept(client); + return client; + } + + private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 0ea9eb7764803..d6ebdd0449e98 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; @@ -46,10 +47,10 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedNodeSelector; -import org.elasticsearch.xpack.ml.datafeed.MlRemoteLicenseChecker; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Predicate; @@ -141,19 +142,22 @@ public void onFailure(Exception e) { DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId()); Job job = mlMetadata.getJobs().get(datafeed.getJobId()); - if (MlRemoteLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { - MlRemoteLicenseChecker remoteLicenseChecker = new MlRemoteLicenseChecker(client); - remoteLicenseChecker.checkRemoteClusterLicenses(MlRemoteLicenseChecker.remoteClusterNames(datafeed.getIndices()), + if (RemoteClusterLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { + final RemoteClusterLicenseChecker remoteClusterLicenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + remoteClusterLicenseChecker.checkRemoteClusterLicenses( + RemoteClusterLicenseChecker.remoteClusterAliases(datafeed.getIndices()), ActionListener.wrap( response -> { - if (response.isViolated()) { + if (response.isSuccess() == false) { listener.onFailure(createUnlicensedError(datafeed.getId(), response)); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); } }, - e -> listener.onFailure(createUnknownLicenseError(datafeed.getId(), - MlRemoteLicenseChecker.remoteIndices(datafeed.getIndices()), e)) + e -> listener.onFailure( + createUnknownLicenseError( + datafeed.getId(), RemoteClusterLicenseChecker.remoteIndices(datafeed.getIndices()), e)) )); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); @@ -232,23 +236,35 @@ public void onFailure(Exception e) { ); } - private ElasticsearchStatusException createUnlicensedError(String datafeedId, - MlRemoteLicenseChecker.LicenseViolation licenseViolation) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use " - + "indices on a remote cluster [" + licenseViolation.get().getClusterName() - + "] that is not licensed for Machine Learning. " - + MlRemoteLicenseChecker.buildErrorMessage(licenseViolation.get()); - + private ElasticsearchStatusException createUnlicensedError( + final String datafeedId, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it is configured to use indices on remote cluster [%s] that is not licensed for ml; %s", + datafeedId, + licenseCheck.remoteClusterLicenseInfo().clusterAlias(), + RemoteClusterLicenseChecker.buildErrorMessage( + "ml", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); } - private ElasticsearchStatusException createUnknownLicenseError(String datafeedId, List remoteIndices, - Exception cause) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use" - + " indices on a remote cluster " + remoteIndices - + " but the license type could not be verified"; - - return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, new Exception(cause.getMessage())); + private ElasticsearchStatusException createUnknownLicenseError( + final String datafeedId, final List remoteIndices, final Exception cause) { + final int numberOfRemoteClusters = RemoteClusterLicenseChecker.remoteClusterAliases(remoteIndices).size(); + assert numberOfRemoteClusters > 0; + final String remoteClusterQualifier = numberOfRemoteClusters == 1 ? "a remote cluster" : "remote clusters"; + final String licenseTypeQualifier = numberOfRemoteClusters == 1 ? "" : "s"; + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it uses indices on %s %s but the license type%s could not be verified", + datafeedId, + remoteClusterQualifier, + remoteIndices, + licenseTypeQualifier); + + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); } public static class StartDatafeedPersistentTasksExecutor extends PersistentTasksExecutor { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index a6be047648623..ce3f611b2227a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -92,7 +93,7 @@ private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { List indices = datafeed.getIndices(); for (String index : indices) { - if (MlRemoteLicenseChecker.isRemoteIndex(index)) { + if (RemoteClusterLicenseChecker.isRemoteIndex(index)) { // We cannot verify remote indices continue; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java deleted file mode 100644 index b0eeed2c800ec..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.License; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.EnumSet; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -/** - * ML datafeeds can use cross cluster search to access data in a remote cluster. - * The remote cluster should be licenced for ML this class performs that check - * using the _xpack (info) endpoint. - */ -public class MlRemoteLicenseChecker { - - private final Client client; - - public static class RemoteClusterLicenseInfo { - private final String clusterName; - private final XPackInfoResponse.LicenseInfo licenseInfo; - - RemoteClusterLicenseInfo(String clusterName, XPackInfoResponse.LicenseInfo licenseInfo) { - this.clusterName = clusterName; - this.licenseInfo = licenseInfo; - } - - public String getClusterName() { - return clusterName; - } - - public XPackInfoResponse.LicenseInfo getLicenseInfo() { - return licenseInfo; - } - } - - public class LicenseViolation { - private final RemoteClusterLicenseInfo licenseInfo; - - private LicenseViolation(@Nullable RemoteClusterLicenseInfo licenseInfo) { - this.licenseInfo = licenseInfo; - } - - public boolean isViolated() { - return licenseInfo != null; - } - - public RemoteClusterLicenseInfo get() { - return licenseInfo; - } - } - - public MlRemoteLicenseChecker(Client client) { - this.client = client; - } - - /** - * Check each cluster is licensed for ML. - * This function evaluates lazily and will terminate when the first cluster - * that is not licensed is found or an error occurs. - * - * @param clusterNames List of remote cluster names - * @param listener Response listener - */ - public void checkRemoteClusterLicenses(List clusterNames, ActionListener listener) { - final Iterator itr = clusterNames.iterator(); - if (itr.hasNext() == false) { - listener.onResponse(new LicenseViolation(null)); - return; - } - - final AtomicReference clusterName = new AtomicReference<>(itr.next()); - - ActionListener infoListener = new ActionListener() { - @Override - public void onResponse(XPackInfoResponse xPackInfoResponse) { - if (licenseSupportsML(xPackInfoResponse.getLicenseInfo()) == false) { - listener.onResponse(new LicenseViolation( - new RemoteClusterLicenseInfo(clusterName.get(), xPackInfoResponse.getLicenseInfo()))); - return; - } - - if (itr.hasNext()) { - clusterName.set(itr.next()); - remoteClusterLicense(clusterName.get(), this); - } else { - listener.onResponse(new LicenseViolation(null)); - } - } - - @Override - public void onFailure(Exception e) { - String message = "Could not determine the X-Pack licence type for cluster [" + clusterName.get() + "]"; - if (e instanceof ActionNotFoundTransportException) { - // This is likely to be because x-pack is not installed in the target cluster - message += ". Is X-Pack installed on the target cluster?"; - } - listener.onFailure(new ElasticsearchException(message, e)); - } - }; - - remoteClusterLicense(clusterName.get(), infoListener); - } - - private void remoteClusterLicense(String clusterName, ActionListener listener) { - Client remoteClusterClient = client.getRemoteClusterClient(clusterName); - ThreadContext threadContext = remoteClusterClient.threadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - // we stash any context here since this is an internal execution and should not leak any - // existing context information. - threadContext.markAsSystemContext(); - - XPackInfoRequest request = new XPackInfoRequest(); - request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); - remoteClusterClient.execute(XPackInfoAction.INSTANCE, request, listener); - } - } - - static boolean licenseSupportsML(XPackInfoResponse.LicenseInfo licenseInfo) { - License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); - return licenseInfo.getStatus() == LicenseStatus.ACTIVE && - (mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL); - } - - public static boolean isRemoteIndex(String index) { - return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; - } - - public static boolean containsRemoteIndex(List indices) { - return indices.stream().anyMatch(MlRemoteLicenseChecker::isRemoteIndex); - } - - /** - * Get any remote indices used in cross cluster search. - * Remote indices are of the form {@code cluster_name:index_name} - * @return List of remote cluster indices - */ - public static List remoteIndices(List indices) { - return indices.stream().filter(MlRemoteLicenseChecker::isRemoteIndex).collect(Collectors.toList()); - } - - /** - * Extract the list of remote cluster names from the list of indices. - * @param indices List of indices. Remote cluster indices are prefixed - * with {@code cluster-name:} - * @return Every cluster name found in {@code indices} - */ - public static List remoteClusterNames(List indices) { - return indices.stream() - .filter(MlRemoteLicenseChecker::isRemoteIndex) - .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) - .distinct() - .collect(Collectors.toList()); - } - - public static String buildErrorMessage(RemoteClusterLicenseInfo clusterLicenseInfo) { - StringBuilder error = new StringBuilder(); - if (clusterLicenseInfo.licenseInfo.getStatus() != LicenseStatus.ACTIVE) { - error.append("The license on cluster [").append(clusterLicenseInfo.clusterName) - .append("] is not active. "); - } else { - License.OperationMode mode = License.OperationMode.resolve(clusterLicenseInfo.licenseInfo.getMode()); - if (mode != License.OperationMode.PLATINUM && mode != License.OperationMode.TRIAL) { - error.append("The license mode [").append(mode) - .append("] on cluster [") - .append(clusterLicenseInfo.clusterName) - .append("] does not enable Machine Learning. "); - } - } - - error.append(Strings.toString(clusterLicenseInfo.licenseInfo)); - return error.toString(); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 72c8d361dd882..610a5c1b92fb6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -14,7 +16,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java deleted file mode 100644 index 81e4c75cfad7c..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.is; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class MlRemoteLicenseCheckerTests extends ESTestCase { - - public void testIsRemoteIndex() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertFalse(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - indices = Arrays.asList("local-index1", "remote-cluster:remote-index2"); - assertTrue(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - } - - public void testRemoteIndices() { - List indices = Collections.singletonList("local-index"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), is(empty())); - indices = Arrays.asList("local-index", "remote-cluster:index1", "local-index2", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), containsInAnyOrder("remote-cluster:index1", "remote-cluster2:index1")); - } - - public void testRemoteClusterNames() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), empty()); - indices = Arrays.asList("local-index1", "remote-cluster1:remote-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1", "remote-cluster2:index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - } - - public void testLicenseSupportsML() { - XPackInfoResponse.LicenseInfo licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", - LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", LicenseStatus.EXPIRED, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "GOLD", "GOLD", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - } - - public void testCheckRemoteClusterLicenses_givenValidLicenses() { - final AtomicInteger index = new AtomicInteger(0); - final List responses = new ArrayList<>(); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - - List remoteClusterNames = Arrays.asList("valid1", "valid2", "valid3"); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertFalse(licCheckResponse.get().isViolated()); - assertNull(licCheckResponse.get().get()); - } - - public void testCheckRemoteClusterLicenses_givenInvalidLicense() { - final AtomicInteger index = new AtomicInteger(0); - List remoteClusterNames = Arrays.asList("good", "cluster-with-basic-license", "good2"); - final List responses = new ArrayList<>(); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertTrue(licCheckResponse.get().isViolated()); - assertEquals("cluster-with-basic-license", licCheckResponse.get().get().getClusterName()); - assertEquals("BASIC", licCheckResponse.get().get().getLicenseInfo().getType()); - } - - public void testBuildErrorMessage() { - XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); - MlRemoteLicenseChecker.RemoteClusterLicenseInfo info = - new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); - assertEquals(Strings.toString(platinumLicence), MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); - String expected = "The license mode [BASIC] on cluster [basic-cluster] does not enable Machine Learning. " - + Strings.toString(basicLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); - expected = "The license on cluster [expired-cluster] is not active. " + Strings.toString(expiredLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - } - - private Client createMockClient() { - Client client = mock(Client.class); - ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - when(client.getRemoteClusterClient(anyString())).thenReturn(client); - return client; - } - - private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index 7ed00a01f3565..c83521591fcf4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -392,7 +392,7 @@ public void testResetScrollAfterSearchPhaseExecutionException() throws IOExcepti assertThat(extractor.hasNext(), is(true)); output = extractor.next(); assertThat(output.isPresent(), is(true)); - assertEquals(new Long(1400L), extractor.getLastTimestamp()); + assertEquals(Long.valueOf(1400L), extractor.getLastTimestamp()); // A second failure is not tolerated assertThat(extractor.hasNext(), is(true)); expectThrows(SearchPhaseExecutionException.class, extractor::next); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index 303fce3d81321..e36c313b626c9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -125,7 +125,7 @@ public void testGetCalandarByJobId() throws Exception { Long matchedCount = queryResult.stream().filter( c -> c.getId().equals("foo calendar") || c.getId().equals("foo bar calendar") || c.getId().equals("cat foo calendar")) .count(); - assertEquals(new Long(3), matchedCount); + assertEquals(Long.valueOf(3), matchedCount); queryResult = getCalendars("bar"); assertThat(queryResult, hasSize(1)); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 0fc4d838f7ce8..09b2ccd079a76 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -194,7 +194,7 @@ public List> getPersistentTasksExecutor(ClusterServic return emptyList(); } - SchedulerEngine schedulerEngine = new SchedulerEngine(getClock()); + SchedulerEngine schedulerEngine = new SchedulerEngine(settings, getClock()); return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(settings, client, schedulerEngine, threadPool)); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 13290f09e8eb8..9a75d6fc67590 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.aggregations.Aggregations; @@ -47,6 +48,9 @@ public class RollupJobTaskTests extends ESTestCase { + private static final Settings SETTINGS = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "test") + .build(); private static ThreadPool pool = new TestThreadPool("test"); @AfterClass @@ -62,7 +66,7 @@ public void testInitialStatusStopped() { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -75,7 +79,7 @@ public void testInitialStatusAborting() { RollupJobStatus status = new RollupJobStatus(IndexerState.ABORTING, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -88,7 +92,7 @@ public void testInitialStatusStopping() { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPING, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -101,7 +105,7 @@ public void testInitialStatusStarted() { RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -114,7 +118,7 @@ public void testInitialStatusIndexingOldID() { RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"), false); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -128,7 +132,7 @@ public void testInitialStatusIndexingNewID() { RollupJobStatus status = new RollupJobStatus(IndexerState.INDEXING, Collections.singletonMap("foo", "bar"), true); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -141,7 +145,7 @@ public void testNoInitialStatus() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -154,7 +158,7 @@ public void testStartWhenStarted() throws InterruptedException { RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, Collections.singletonMap("foo", "bar"), randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); @@ -641,7 +645,7 @@ public void testStopWhenStopped() throws InterruptedException { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); @@ -748,7 +752,7 @@ public void testStopWhenAborting() throws InterruptedException { RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, randomBoolean()); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); - SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); + SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); CountDownLatch latch = new CountDownLatch(2); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java index cd5a720eef1dc..28f2756cf262c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java @@ -13,12 +13,6 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140JKSKeystoreBootstrapCheck(Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +22,7 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final Settings settings = context.settings; Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) .filter(k -> settings.get(k).equalsIgnoreCase("jks")); @@ -50,6 +44,6 @@ public BootstrapCheckResult check(BootstrapContext context) { @Override public boolean alwaysEnforce() { - return fipsModeEnabled; + return true; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java index d1bce0dcdd24c..957276bdad2f3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java @@ -10,6 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.EnumSet; @@ -21,15 +22,9 @@ final class FIPS140LicenseBootstrapCheck implements BootstrapCheck { static final EnumSet ALLOWED_LICENSE_OPERATION_MODES = EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); - private final boolean isInFipsMode; - - FIPS140LicenseBootstrapCheck(boolean isInFipsMode) { - this.isInFipsMode = isInFipsMode; - } - @Override public BootstrapCheckResult check(BootstrapContext context) { - if (isInFipsMode) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { License license = LicenseService.getLicense(context.metaData); if (license != null && ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { return BootstrapCheckResult.failure("FIPS mode is only allowed with a Platinum or Trial license"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java index 751d63be4fb3f..3faec3d747575 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java @@ -7,19 +7,12 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Locale; public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140PasswordHashingAlgorithmBootstrapCheck(final Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +21,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapC */ @Override public BootstrapCheckResult check(final BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings); if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 02910b5dd74fc..2a392478cbcd9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -300,9 +300,9 @@ public Security(Settings settings, final Path configPath) { new PkiRealmBootstrapCheck(getSslService()), new TLSLicenseBootstrapCheck(), new FIPS140SecureSettingsBootstrapCheck(settings, env), - new FIPS140JKSKeystoreBootstrapCheck(settings), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings), - new FIPS140LicenseBootstrapCheck(XPackSettings.FIPS_MODE_ENABLED.get(settings)))); + new FIPS140JKSKeystoreBootstrapCheck(), + new FIPS140PasswordHashingAlgorithmBootstrapCheck(), + new FIPS140LicenseBootstrapCheck())); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateMaxDeterminizedStates(settings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java index 1d4da71e11b5e..b659adf22cfc3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java @@ -14,7 +14,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { public void testNoKeystoreIsAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true"); - assertFalse(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertFalse(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLKeystoreTypeIsNotAllowed() { @@ -22,7 +22,7 @@ public void testSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLImplicitKeystoreTypeIsNotAllowed() { @@ -30,7 +30,7 @@ public void testSSLImplicitKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testTransportSSLKeystoreTypeIsNotAllowed() { @@ -38,7 +38,7 @@ public void testTransportSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.transport.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.transport.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testHttpSSLKeystoreTypeIsNotAllowed() { @@ -46,7 +46,7 @@ public void testHttpSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.http.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.http.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testRealmKeystoreTypeIsNotAllowed() { @@ -54,13 +54,13 @@ public void testRealmKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.authc.realms.ldap.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testImplicitRealmKeystoreTypeIsNotAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java index a2ec8f9fb2053..fb4c9e21a258f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java @@ -17,27 +17,29 @@ public class FIPS140LicenseBootstrapCheckTests extends ESTestCase { public void testBootstrapCheck() throws Exception { - assertTrue(new FIPS140LicenseBootstrapCheck(false) - .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(randomBoolean()) + assertTrue(new FIPS140LicenseBootstrapCheck() .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); + assertTrue(new FIPS140LicenseBootstrapCheck() + .check(new BootstrapContext(Settings.builder().put("xpack.security.fips_mode.enabled", randomBoolean()).build(), MetaData + .EMPTY_META_DATA)).isSuccess()); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); MetaData metaData = builder.build(); + if (FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode())) { - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); } else { - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isFailure()); assertEquals("FIPS mode is only allowed with a Platinum or Trial license", - new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).getMessage()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java index 8632400866a07..6376ca211dcae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java @@ -25,7 +25,7 @@ public void testPBKDF2AlgorithmIsAllowed() { .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2_10000") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } @@ -35,7 +35,7 @@ public void testPBKDF2AlgorithmIsAllowed() { .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } } @@ -55,7 +55,7 @@ private void runBCRYPTTest(final boolean fipsModeEnabled, final String passwordH } final Settings settings = builder.build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertThat(result.isFailure(), equalTo(fipsModeEnabled)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index 0f88148a9a973..2bd1bdf906ad8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -72,6 +72,8 @@ public abstract class KerberosTestCase extends ESTestCase { unsupportedLocaleLanguages.add("ks"); unsupportedLocaleLanguages.add("ckb"); unsupportedLocaleLanguages.add("ne"); + unsupportedLocaleLanguages.add("dz"); + unsupportedLocaleLanguages.add("mzn"); } @BeforeClass diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index 70ab085fcf72b..7397ebc8c7dc4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.MockSecureSettings; @@ -35,6 +34,9 @@ import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; +import javax.net.SocketFactory; +import javax.net.ssl.HandshakeCompletedListener; +import javax.net.ssl.SSLSocket; import java.io.IOException; import java.net.InetAddress; import java.net.SocketTimeoutException; @@ -44,10 +46,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -import javax.net.SocketFactory; -import javax.net.ssl.HandshakeCompletedListener; -import javax.net.ssl.SSLSocket; - import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; @@ -118,12 +116,6 @@ protected MockTransportService build(Settings settings, Version version, Cluster return transportService; } - @Override - protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { - TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; - CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); - } - public void testConnectException() throws UnknownHostException { try { serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index aa9d434f332e3..3b5180b71f7c2 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -58,7 +58,7 @@ private TypeConverter() { private static final long DAY_IN_MILLIS = 60 * 60 * 24; private static final Map, JDBCType> javaToJDBC; - + static { Map, JDBCType> aMap = Arrays.stream(DataType.values()) .filter(dataType -> dataType.javaClass() != null @@ -119,7 +119,7 @@ private static T dateTimeConvert(Long millis, Calendar c, Function T convert(Object val, JDBCType columnType, Class type) throws SQLE if (type == null) { return (T) convert(val, columnType); } - + if (type.isInstance(val)) { try { return type.cast(val); @@ -150,7 +150,7 @@ static T convert(Object val, JDBCType columnType, Class type) throws SQLE throw new SQLDataException("Unable to convert " + val.getClass().getName() + " to " + columnType, cce); } } - + if (type == String.class) { return (T) asString(convert(val, columnType)); } @@ -276,8 +276,8 @@ static boolean isSigned(JDBCType jdbcType) throws SQLException { } return dataType.isSigned(); } - - + + static JDBCType fromJavaToJDBC(Class clazz) throws SQLException { for (Entry, JDBCType> e : javaToJDBC.entrySet()) { // java.util.Calendar from {@code javaToJDBC} is an abstract class and this method can be used with concrete classes as well @@ -285,7 +285,7 @@ static JDBCType fromJavaToJDBC(Class clazz) throws SQLException { return e.getValue(); } } - + throw new SQLFeatureNotSupportedException("Objects of type " + clazz.getName() + " are not supported"); } @@ -432,7 +432,7 @@ private static Float asFloat(Object val, JDBCType columnType) throws SQLExceptio case REAL: case FLOAT: case DOUBLE: - return new Float(((Number) val).doubleValue()); + return Float.valueOf((((float) ((Number) val).doubleValue()))); default: } @@ -451,7 +451,7 @@ private static Double asDouble(Object val, JDBCType columnType) throws SQLExcept case REAL: case FLOAT: case DOUBLE: - return new Double(((Number) val).doubleValue()); + return Double.valueOf(((Number) val).doubleValue()); default: } @@ -539,4 +539,4 @@ private static long safeToLong(double x) throws SQLException { } return Math.round(x); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java index ad96825896e1a..9da06f6537c0e 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java @@ -38,27 +38,27 @@ import static java.sql.JDBCType.VARCHAR; public class JdbcPreparedStatementTests extends ESTestCase { - + public void testSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setBoolean(1, true); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, false); assertEquals(false, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, true, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); assertTrue(value(jps) instanceof Boolean); - + jps.setObject(1, true, Types.INTEGER); assertEquals(1, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, true, Types.VARCHAR); assertEquals("true", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); @@ -66,264 +66,264 @@ public void testSettingBooleanValues() throws SQLException { public void testThrownExceptionsWhenSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, true, Types.TIMESTAMP)); assertEquals("Conversion from type [BOOLEAN] to [Timestamp] not supported", sqle.getMessage()); } - + public void testSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setString(1, "foo bar"); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, "foo bar"); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, "foo bar", Types.VARCHAR); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); assertTrue(value(jps) instanceof String); } - + public void testThrownExceptionsWhenSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER)); assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage()); } - + public void testSettingByteTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setByte(1, (byte) 6); assertEquals((byte) 6, value(jps)); assertEquals(TINYINT, jdbcType(jps)); - + jps.setObject(1, (byte) 6); assertEquals((byte) 6, value(jps)); assertEquals(TINYINT, jdbcType(jps)); assertTrue(value(jps) instanceof Byte); - + jps.setObject(1, (byte) 0, Types.BOOLEAN); assertEquals(false, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (byte) 123, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (byte) 123, Types.INTEGER); assertEquals(123, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, (byte) -128, Types.DOUBLE); assertEquals(-128.0, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingByteTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (byte) 6, Types.TIMESTAMP)); assertEquals("Conversion from type [TINYINT] to [Timestamp] not supported", sqle.getMessage()); } - + public void testSettingShortTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + short someShort = randomShort(); jps.setShort(1, someShort); assertEquals(someShort, value(jps)); assertEquals(SMALLINT, jdbcType(jps)); - + jps.setObject(1, someShort); assertEquals(someShort, value(jps)); assertEquals(SMALLINT, jdbcType(jps)); assertTrue(value(jps) instanceof Short); - + jps.setObject(1, (short) 1, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (short) -32700, Types.DOUBLE); assertEquals(-32700.0, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someShort, Types.INTEGER); assertEquals((int) someShort, value(jps)); assertEquals(INTEGER, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingShortTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (short) 6, Types.TIMESTAMP)); assertEquals("Conversion from type [SMALLINT] to [Timestamp] not supported", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, 256, Types.TINYINT)); assertEquals("Numeric " + 256 + " out of range", sqle.getMessage()); } - + public void testSettingIntegerValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + int someInt = randomInt(); jps.setInt(1, someInt); assertEquals(someInt, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, someInt); assertEquals(someInt, value(jps)); assertEquals(INTEGER, jdbcType(jps)); assertTrue(value(jps) instanceof Integer); - + jps.setObject(1, someInt, Types.VARCHAR); assertEquals(String.valueOf(someInt), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someInt, Types.FLOAT); assertEquals(Double.valueOf(someInt), value(jps)); assertTrue(value(jps) instanceof Double); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingIntegerValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); int someInt = randomInt(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someInt, Types.TIMESTAMP)); assertEquals("Conversion from type [INTEGER] to [Timestamp] not supported", sqle.getMessage()); - + Integer randomIntNotShort = randomIntBetween(32768, Integer.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.SMALLINT)); assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.TINYINT)); assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage()); } - + public void testSettingLongValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + long someLong = randomLong(); jps.setLong(1, someLong); assertEquals(someLong, value(jps)); assertEquals(BIGINT, jdbcType(jps)); - + jps.setObject(1, someLong); assertEquals(someLong, value(jps)); assertEquals(BIGINT, jdbcType(jps)); assertTrue(value(jps) instanceof Long); - + jps.setObject(1, someLong, Types.VARCHAR); assertEquals(String.valueOf(someLong), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someLong, Types.DOUBLE); assertEquals((double) someLong, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someLong, Types.FLOAT); assertEquals((double) someLong, value(jps)); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingLongValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); long someLong = randomLong(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someLong, Types.TIMESTAMP)); assertEquals("Conversion from type [BIGINT] to [Timestamp] not supported", sqle.getMessage()); - + Long randomLongNotShort = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.INTEGER)); assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.SMALLINT)); assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage()); } - + public void testSettingFloatValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + float someFloat = randomFloat(); jps.setFloat(1, someFloat); assertEquals(someFloat, value(jps)); assertEquals(REAL, jdbcType(jps)); - + jps.setObject(1, someFloat); assertEquals(someFloat, value(jps)); assertEquals(REAL, jdbcType(jps)); assertTrue(value(jps) instanceof Float); - + jps.setObject(1, someFloat, Types.VARCHAR); assertEquals(String.valueOf(someFloat), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someFloat, Types.DOUBLE); assertEquals((double) someFloat, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someFloat, Types.FLOAT); assertEquals((double) someFloat, value(jps)); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingFloatValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); float someFloat = randomFloat(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someFloat, Types.TIMESTAMP)); assertEquals("Conversion from type [REAL] to [Timestamp] not supported", sqle.getMessage()); - + Float floatNotInt = 5_155_000_000f; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.INTEGER)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.SMALLINT)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage()); } - + public void testSettingDoubleValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + double someDouble = randomDouble(); jps.setDouble(1, someDouble); assertEquals(someDouble, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someDouble); assertEquals(someDouble, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); assertTrue(value(jps) instanceof Double); - + jps.setObject(1, someDouble, Types.VARCHAR); assertEquals(String.valueOf(someDouble), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someDouble, Types.REAL); - assertEquals(new Float(someDouble), value(jps)); + assertEquals((float) someDouble, value(jps)); assertEquals(REAL, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingDoubleValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); double someDouble = randomDouble(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someDouble, Types.TIMESTAMP)); assertEquals("Conversion from type [DOUBLE] to [Timestamp] not supported", sqle.getMessage()); - + Double doubleNotInt = 5_155_000_000d; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, doubleNotInt, Types.INTEGER)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(((Number) doubleNotInt).longValue())), sqle.getMessage()); } - + public void testUnsupportedClasses() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLFeatureNotSupportedException sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new Struct() { @@ -341,23 +341,23 @@ public Object[] getAttributes() throws SQLException { } })); assertEquals("Objects of type java.sql.Struct are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new URL("http://test"))); assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setURL(1, new URL("http://test"))); assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, this, Types.TIMESTAMP)); assertEquals("Conversion from type " + this.getClass().getName() + " to TIMESTAMP not supported", sfnse.getMessage()); - + SQLException se = expectThrows(SQLException.class, () -> jps.setObject(1, this, 1_000_000)); assertEquals("Type:1000000 is not a valid Types.java value.", se.getMessage()); - + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> jps.setObject(1, randomShort(), Types.CHAR)); assertEquals("Unsupported JDBC type [CHAR]", iae.getMessage()); } - + public void testSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); @@ -365,186 +365,186 @@ public void testSettingTimestampValues() throws SQLException { jps.setTimestamp(1, someTimestamp); assertEquals(someTimestamp.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - + Calendar nonDefaultCal = randomCalendar(); // February 29th, 2016. 01:17:55 GMT = 1456708675000 millis since epoch jps.setTimestamp(1, new Timestamp(1456708675000L), nonDefaultCal); assertEquals(1456708675000L, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); - + long beforeEpochTime = -randomMillisSinceEpoch(); jps.setTimestamp(1, new Timestamp(beforeEpochTime), nonDefaultCal); assertEquals(beforeEpochTime, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someTimestamp, Types.VARCHAR); assertEquals(someTimestamp.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someTimestamp, Types.INTEGER)); assertEquals("Conversion from type java.sql.Timestamp to INTEGER not supported", sqle.getMessage()); } public void testSettingTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + Time time = new Time(4675000); Calendar nonDefaultCal = randomCalendar(); jps.setTime(1, time, nonDefaultCal); assertEquals(4675000, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, time, Types.VARCHAR); assertEquals(time.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Time time = new Time(4675000); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, time, Types.INTEGER)); assertEquals("Conversion from type java.sql.Time to INTEGER not supported", sqle.getMessage()); } - + public void testSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); jps.setDate(1, someSqlDate); assertEquals(someSqlDate.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - + someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); Calendar nonDefaultCal = randomCalendar(); jps.setDate(1, someSqlDate, nonDefaultCal); assertEquals(someSqlDate.getTime(), convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someSqlDate, Types.VARCHAR); assertEquals(someSqlDate.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); - - SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, + + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new java.sql.Date(randomMillisSinceEpoch()), Types.DOUBLE)); assertEquals("Conversion from type " + someSqlDate.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } - + public void testSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); someCalendar.setTimeInMillis(randomMillisSinceEpoch()); - + jps.setObject(1, someCalendar); assertEquals(someCalendar.getTime(), (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someCalendar, Types.VARCHAR); assertEquals(someCalendar.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); - + Calendar nonDefaultCal = randomCalendar(); jps.setObject(1, nonDefaultCal); assertEquals(nonDefaultCal.getTime(), (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someCalendar, Types.DOUBLE)); assertEquals("Conversion from type " + someCalendar.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } - + public void testSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Date someDate = new Date(randomMillisSinceEpoch()); - + jps.setObject(1, someDate); assertEquals(someDate, (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someDate, Types.VARCHAR); assertEquals(someDate.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Date someDate = new Date(randomMillisSinceEpoch()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someDate, Types.BIGINT)); assertEquals("Conversion from type " + someDate.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); } - + public void testSettingLocalDateTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone()); - + jps.setObject(1, ldt); assertEquals(Date.class, value(jps).getClass()); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, ldt, Types.VARCHAR); assertEquals(ldt.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingLocalDateTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, ldt, Types.BIGINT)); assertEquals("Conversion from type " + ldt.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); } - + public void testSettingByteArrayValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + byte[] buffer = "some data".getBytes(StandardCharsets.UTF_8); jps.setBytes(1, buffer); assertEquals(byte[].class, value(jps).getClass()); assertEquals(VARBINARY, jdbcType(jps)); - + jps.setObject(1, buffer); assertEquals(byte[].class, value(jps).getClass()); assertEquals(VARBINARY, jdbcType(jps)); assertTrue(value(jps) instanceof byte[]); - + jps.setObject(1, buffer, Types.VARBINARY); assertEquals((byte[]) value(jps), buffer); assertEquals(VARBINARY, jdbcType(jps)); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR)); assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage()); - + sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE)); assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } - + public void testThrownExceptionsWhenSettingByteArrayValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); byte[] buffer = "foo".getBytes(StandardCharsets.UTF_8); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR)); assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage()); - + sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE)); assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } @@ -564,14 +564,14 @@ private JDBCType jdbcType(JdbcPreparedStatement jps) throws SQLException { private Object value(JdbcPreparedStatement jps) throws SQLException { return jps.query.getParam(1).value; } - + private Calendar randomCalendar() { return Calendar.getInstance(randomTimeZone(), Locale.ROOT); } - + /* * Converts from UTC to the provided Calendar. - * Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert + * Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert * the values correctly to UTC. */ private long convertFromUTCtoCalendar(Date date, Calendar nonDefaultCal) throws SQLException { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java index ec309c69476cc..695c9b192eaa6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java @@ -13,20 +13,15 @@ public abstract class CronnableSchedule implements Schedule { - private static final Comparator CRON_COMPARATOR = new Comparator() { - @Override - public int compare(Cron c1, Cron c2) { - return c1.expression().compareTo(c2.expression()); - } - }; + private static final Comparator CRON_COMPARATOR = Comparator.comparing(Cron::expression); protected final Cron[] crons; - public CronnableSchedule(String... expressions) { + CronnableSchedule(String... expressions) { this(crons(expressions)); } - public CronnableSchedule(Cron... crons) { + private CronnableSchedule(Cron... crons) { assert crons.length > 0; this.crons = crons; Arrays.sort(crons, CRON_COMPARATOR); @@ -37,7 +32,15 @@ public long nextScheduledTimeAfter(long startTime, long time) { assert time >= startTime; long nextTime = Long.MAX_VALUE; for (Cron cron : crons) { - nextTime = Math.min(nextTime, cron.getNextValidTimeAfter(time)); + long nextValidTimeAfter = cron.getNextValidTimeAfter(time); + + boolean previousCronExpired = nextTime == -1; + boolean currentCronValid = nextValidTimeAfter > -1; + if (previousCronExpired && currentCronValid) { + nextTime = nextValidTimeAfter; + } else { + nextTime = Math.min(nextTime, nextValidTimeAfter); + } } return nextTime; } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java index 7331be4553bbe..7c3abd1d808e9 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java @@ -33,7 +33,7 @@ public void testOpEvalEQ() throws Exception { assertThat(CompareCondition.Op.EQ.eval(null, null), is(true)); assertThat(CompareCondition.Op.EQ.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.EQ.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.EQ.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.EQ.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.EQ.eval(3, null), is(false)); assertThat(CompareCondition.Op.EQ.eval(2, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.EQ.eval(3, "4"), is(false)); // comparing as strings @@ -59,7 +59,7 @@ public void testOpEvalNotEQ() throws Exception { assertThat(CompareCondition.Op.NOT_EQ.eval(null, null), is(false)); assertThat(CompareCondition.Op.NOT_EQ.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.NOT_EQ.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(2, Float.valueOf((float)3.0)), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(3, null), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(2, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.NOT_EQ.eval(3, "4"), is(true)); // comparing as strings @@ -83,7 +83,7 @@ public void testOpEvalNotEQ() throws Exception { public void testOpEvalGTE() throws Exception { assertThat(CompareCondition.Op.GTE.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.GTE.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.GTE.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GTE.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.GTE.eval(3, null), is(false)); assertThat(CompareCondition.Op.GTE.eval(3, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.GTE.eval(3, "4"), is(false)); // comparing as strings @@ -103,7 +103,7 @@ public void testOpEvalGTE() throws Exception { public void testOpEvalGT() throws Exception { assertThat(CompareCondition.Op.GT.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.GT.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.GT.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GT.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.GT.eval(3, null), is(false)); assertThat(CompareCondition.Op.GT.eval(3, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.GT.eval(3, "4"), is(false)); // comparing as strings @@ -124,7 +124,7 @@ public void testOpEvalGT() throws Exception { public void testOpEvalLTE() throws Exception { assertThat(CompareCondition.Op.LTE.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.LTE.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.LTE.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LTE.eval(2, Float.valueOf((float) 3.0)), is(true)); assertThat(CompareCondition.Op.LTE.eval(3, null), is(false)); assertThat(CompareCondition.Op.LTE.eval(3, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.LTE.eval(3, "4"), is(true)); // comparing as strings @@ -144,7 +144,7 @@ public void testOpEvalLTE() throws Exception { public void testOpEvalLT() throws Exception { assertThat(CompareCondition.Op.LT.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.LT.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.LT.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LT.eval(2, Float.valueOf((float) 3.0)), is(true)); assertThat(CompareCondition.Op.LT.eval(3, null), is(false)); assertThat(CompareCondition.Op.LT.eval(3, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.LT.eval(3, "4"), is(true)); // comparing as strings diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java index 1ade767410b53..6cbdf6e122656 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java @@ -11,11 +11,15 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.hasItemInArray; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; public class CronScheduleTests extends ScheduleTestCase { public void testInvalid() throws Exception { @@ -54,18 +58,25 @@ public void testParseMultiple() throws Exception { assertThat(crons, hasItemInArray("0 0/3 * * * ?")); } + public void testMultipleCronsNextScheduledAfter() { + CronSchedule schedule = new CronSchedule("0 5 9 1 1 ? 2019", "0 5 9 1 1 ? 2020", "0 5 9 1 1 ? 2017"); + ZonedDateTime start2019 = ZonedDateTime.of(2019, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + ZonedDateTime start2020 = ZonedDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + long firstSchedule = schedule.nextScheduledTimeAfter(0, start2019.toInstant().toEpochMilli()); + long secondSchedule = schedule.nextScheduledTimeAfter(0, start2020.toInstant().toEpochMilli()); + + assertThat(firstSchedule, is(not(-1L))); + assertThat(secondSchedule, is(not(-1L))); + assertThat(firstSchedule, is(not(secondSchedule))); + } + public void testParseInvalidBadExpression() throws Exception { XContentBuilder builder = jsonBuilder().value("0 0/5 * * ?"); BytesReference bytes = BytesReference.bytes(builder); XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); - try { - new CronSchedule.Parser().parse(parser); - fail("expected cron parsing to fail when using invalid cron expression"); - } catch (ElasticsearchParseException pe) { - // expected - assertThat(pe.getCause(), instanceOf(IllegalArgumentException.class)); - } + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> new CronSchedule.Parser().parse(parser)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } public void testParseInvalidEmpty() throws Exception {