diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 00af4d006d0ac..2bd91b7fe3739 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.18", "8.12.1", "8.13.0"] + BWC_VERSION: ["7.17.19", "8.12.2", "8.13.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 30d4f4486dad5..ed00a0655dbd8 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1121,6 +1121,22 @@ steps: env: BWC_VERSION: 7.17.18 + - label: "{{matrix.image}} / 7.17.19 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.19 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.19 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 @@ -1841,6 +1857,22 @@ steps: env: BWC_VERSION: 8.12.1 + - label: "{{matrix.image}} / 8.12.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.12.2 + - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 44007272f8954..86dc3c216d060 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -682,6 +682,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.18 + - label: 7.17.19 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.19#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.19 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 @@ -1132,6 +1142,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.1 + - label: 8.12.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.12.2 - label: 8.13.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest timeout_in_minutes: 300 diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index c4aa43c775b1e..de0212685a8a7 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -11,7 +11,7 @@ "set_commit_status": false, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*(buildkite\\s*)?test\\s+this(\\s+please)?)", + "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*((buildkite|@elastic(search)?machine)\\s*)?test\\s+this(\\s+please)?)", "cancel_intermediate_builds": true, "cancel_intermediate_builds_on_comment": false }, diff --git a/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap b/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap index 6df8ca8b63438..50dea7a07e042 100644 --- a/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap +++ b/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap @@ -201,3 +201,111 @@ exports[`generatePipelines should generate correct pipeline when using a trigger }, ] `; + +exports[`generatePipelines should generate correct pipelines with a non-docs change and @elasticmachine 1`] = ` +[ + { + "name": "bwc-snapshots", + "pipeline": { + "steps": [ + { + "group": "bwc-snapshots", + "steps": [ + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed v{{matrix.BWC_VERSION}}#bwcTest", + "env": { + "BWC_VERSION": "{{matrix.BWC_VERSION}}", + }, + "label": "{{matrix.BWC_VERSION}} / bwc-snapshots", + "matrix": { + "setup": { + "BWC_VERSION": [ + "7.17.14", + "8.10.3", + "8.11.0", + ], + }, + }, + "timeout_in_minutes": 300, + }, + ], + }, + ], + }, + }, + { + "name": "using-defaults", + "pipeline": { + "env": { + "CUSTOM_ENV_VAR": "value", + }, + "steps": [ + { + "command": "echo 'hello world'", + "label": "test-step", + }, + ], + }, + }, +] +`; + +exports[`generatePipelines should generate correct pipelines with a non-docs change and @elasticsearchmachine 1`] = ` +[ + { + "name": "bwc-snapshots", + "pipeline": { + "steps": [ + { + "group": "bwc-snapshots", + "steps": [ + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed v{{matrix.BWC_VERSION}}#bwcTest", + "env": { + "BWC_VERSION": "{{matrix.BWC_VERSION}}", + }, + "label": "{{matrix.BWC_VERSION}} / bwc-snapshots", + "matrix": { + "setup": { + "BWC_VERSION": [ + "7.17.14", + "8.10.3", + "8.11.0", + ], + }, + }, + "timeout_in_minutes": 300, + }, + ], + }, + ], + }, + }, + { + "name": "using-defaults", + "pipeline": { + "env": { + "CUSTOM_ENV_VAR": "value", + }, + "steps": [ + { + "command": "echo 'hello world'", + "label": "test-step", + }, + ], + }, + }, +] +`; diff --git a/.buildkite/scripts/pull-request/pipeline.test.ts b/.buildkite/scripts/pull-request/pipeline.test.ts index d0634752260e4..562f37abbae1f 100644 --- a/.buildkite/scripts/pull-request/pipeline.test.ts +++ b/.buildkite/scripts/pull-request/pipeline.test.ts @@ -13,11 +13,11 @@ describe("generatePipelines", () => { }); // Helper for testing pipeline generations that should be the same when using the overall ci trigger comment "buildkite test this" - const testWithTriggerCheck = (directory: string, changedFiles?: string[]) => { + const testWithTriggerCheck = (directory: string, changedFiles?: string[], comment = "buildkite test this") => { const pipelines = generatePipelines(directory, changedFiles); expect(pipelines).toMatchSnapshot(); - process.env["GITHUB_PR_TRIGGER_COMMENT"] = "buildkite test this"; + process.env["GITHUB_PR_TRIGGER_COMMENT"] = comment; const pipelinesWithTriggerComment = generatePipelines(directory, changedFiles); expect(pipelinesWithTriggerComment).toEqual(pipelines); }; @@ -42,4 +42,20 @@ describe("generatePipelines", () => { const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); expect(pipelines).toMatchSnapshot(); }); + + test("should generate correct pipelines with a non-docs change and @elasticmachine", () => { + testWithTriggerCheck( + `${import.meta.dir}/mocks/pipelines`, + ["build.gradle", "docs/README.asciidoc"], + "@elasticmachine test this please" + ); + }); + + test("should generate correct pipelines with a non-docs change and @elasticsearchmachine", () => { + testWithTriggerCheck( + `${import.meta.dir}/mocks/pipelines`, + ["build.gradle", "docs/README.asciidoc"], + "@elasticsearchmachine test this please" + ); + }); }); diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts index 65aec47fe3cc8..6cb0e5d76b74b 100644 --- a/.buildkite/scripts/pull-request/pipeline.ts +++ b/.buildkite/scripts/pull-request/pipeline.ts @@ -148,7 +148,9 @@ export const generatePipelines = ( // However, if we're using the overall CI trigger "[buildkite] test this [please]", we should use the regular filters above if ( process.env["GITHUB_PR_TRIGGER_COMMENT"] && - !process.env["GITHUB_PR_TRIGGER_COMMENT"].match(/^\s*(buildkite\s*)?test\s+this(\s+please)?/i) + !process.env["GITHUB_PR_TRIGGER_COMMENT"].match( + /^\s*((@elastic(search)?machine|buildkite)\s*)?test\s+this(\s+please)?/i + ) ) { filters = [triggerCommentCheck]; } diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 3871c6d06fd23..8ac1a60c9530c 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -67,6 +67,7 @@ BWC_VERSION: - "7.17.16" - "7.17.17" - "7.17.18" + - "7.17.19" - "8.0.0" - "8.0.1" - "8.1.0" @@ -112,4 +113,5 @@ BWC_VERSION: - "8.11.4" - "8.12.0" - "8.12.1" + - "8.12.2" - "8.13.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 36c0eb5a2999c..079f3565880e4 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.18" - - "8.12.1" + - "7.17.19" + - "8.12.2" - "8.13.0" diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java index 9858b124f0e73..9511a6bc01e08 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.plugins.internal.DocumentParsingObserver; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.script.ScriptContext; @@ -72,8 +71,7 @@ public static MapperService create(String mappings) { public T compile(Script script, ScriptContext scriptContext) { throw new UnsupportedOperationException(); } - }, - () -> DocumentParsingObserver.EMPTY_INSTANCE + } ); try { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java index 6aacf319e6b8b..b6cbc3e7cce02 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/QueryParserHelperBenchmark.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.plugins.internal.DocumentParsingObserver; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.script.ScriptContext; @@ -187,8 +186,7 @@ protected final MapperService createMapperService(String mappings) { public T compile(Script script, ScriptContext scriptContext) { throw new UnsupportedOperationException(); } - }, - () -> DocumentParsingObserver.EMPTY_INSTANCE + } ); try { diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java index 5cdc72ebde38e..39a4ec3f30e85 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java @@ -119,29 +119,29 @@ public static SourceSetContainer getJavaSourceSets(Project project) { return project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets(); } - public static File locateElasticsearchWorkspace(Gradle gradle) { - if(gradle.getRootProject().getName().startsWith("build-tools")) { - File buildToolsParent = gradle.getRootProject().getRootDir().getParentFile(); - if(versionFileExists(buildToolsParent)) { - return buildToolsParent; - } + public static File locateElasticsearchWorkspace(Gradle gradle) { + if (gradle.getRootProject().getName().startsWith("build-tools")) { + File buildToolsParent = gradle.getRootProject().getRootDir().getParentFile(); + if (versionFileExists(buildToolsParent)) { return buildToolsParent; } - if (gradle.getParent() == null) { - // See if any of these included builds is the Elasticsearch gradle - for (IncludedBuild includedBuild : gradle.getIncludedBuilds()) { - if (versionFileExists(includedBuild.getProjectDir())) { - return includedBuild.getProjectDir(); - } + return buildToolsParent; + } + if (gradle.getParent() == null) { + // See if any of these included builds is the Elasticsearch gradle + for (IncludedBuild includedBuild : gradle.getIncludedBuilds()) { + if (versionFileExists(includedBuild.getProjectDir())) { + return includedBuild.getProjectDir(); } - - // Otherwise assume this gradle is the root elasticsearch workspace - return gradle.getRootProject().getRootDir(); - } else { - // We're an included build, so keep looking - return locateElasticsearchWorkspace(gradle.getParent()); } + + // Otherwise assume this gradle is the root elasticsearch workspace + return gradle.getRootProject().getRootDir(); + } else { + // We're an included build, so keep looking + return locateElasticsearchWorkspace(gradle.getParent()); } + } private static boolean versionFileExists(File rootDir) { return new File(rootDir, "build-tools-internal/version.properties").exists(); diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 934d9f05d77a2..758cdf687e6b6 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -143,6 +143,10 @@ gradlePlugin { id = 'elasticsearch.mrjar' implementationClass = 'org.elasticsearch.gradle.internal.MrjarPlugin' } + embeddedProvider { + id = 'elasticsearch.embedded-providers' + implementationClass = 'org.elasticsearch.gradle.internal.EmbeddedProviderPlugin' + } releaseTools { id = 'elasticsearch.release-tools' implementationClass = 'org.elasticsearch.gradle.internal.release.ReleaseToolsPlugin' diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 683a2d5604055..ce068d4ca6490 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -7,6 +7,7 @@ */ import org.elasticsearch.gradle.util.Pair +import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.info.BuildParams import org.jetbrains.gradle.ext.JUnit @@ -109,16 +110,25 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } } + // aggregate task so dependency artifacts below can can use one task name + tasks.register("generateProviderImpls").configure { + group = 'ide' + description = 'Builds all embedded provider impls' + + dependsOn subprojects + .collect { GradleUtils.findByName(it.tasks, 'generateProviderImpls') } + .findAll { it != null } + } + tasks.register('buildDependencyArtifacts') { group = 'ide' description = 'Builds artifacts needed as dependency for IDE modules' - dependsOn([':client:rest-high-level:shadowJar', - ':plugins:repository-hdfs:hadoop-client-api:shadowJar', - ':libs:elasticsearch-x-content:generateProviderImpl', + dependsOn([':plugins:repository-hdfs:hadoop-client-api:shadowJar', ':x-pack:plugin:esql:compute:ann:jar', ':x-pack:plugin:esql:compute:gen:jar', ':server:generateModulesList', - ':server:generatePluginsList'].collect { elasticsearchProject.right()?.task(it) ?: it }) + ':server:generatePluginsList', + ':generateProviderImpls'].collect { elasticsearchProject.right()?.task(it) ?: it }) } idea { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 4a695e93ebdfe..e224b16bf588e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -26,14 +26,26 @@ import org.gradle.api.tasks.compile.CompileOptions; import org.gradle.api.tasks.compile.GroovyCompile; import org.gradle.api.tasks.compile.JavaCompile; +import org.gradle.jvm.toolchain.JavaLanguageVersion; +import org.gradle.jvm.toolchain.JavaToolchainService; import java.util.List; +import javax.inject.Inject; + /** * A wrapper around Gradle's Java Base plugin that applies our * common configuration for production code. */ public class ElasticsearchJavaBasePlugin implements Plugin { + + private final JavaToolchainService javaToolchains; + + @Inject + ElasticsearchJavaBasePlugin(JavaToolchainService javaToolchains) { + this.javaToolchains = javaToolchains; + } + @Override public void apply(Project project) { // make sure the global build info plugin is applied to the root project @@ -103,7 +115,7 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S /** * Adds compiler settings to the project */ - public static void configureCompile(Project project) { + public void configureCompile(Project project) { project.getExtensions().getExtraProperties().set("compactProfile", "full"); JavaPluginExtension java = project.getExtensions().getByType(JavaPluginExtension.class); if (BuildParams.getJavaToolChainSpec().isPresent()) { @@ -112,6 +124,10 @@ public static void configureCompile(Project project) { java.setSourceCompatibility(BuildParams.getMinimumRuntimeVersion()); java.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion()); project.getTasks().withType(JavaCompile.class).configureEach(compileTask -> { + compileTask.getJavaCompiler().set(javaToolchains.compilerFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(BuildParams.getMinimumRuntimeVersion().getMajorVersion())); + })); + CompileOptions compileOptions = compileTask.getOptions(); /* * -path because gradle will send in paths that don't always exist. diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 31b62c4ac700f..ed2dfb577e038 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -144,6 +144,7 @@ public void execute(Task t) { // don't track these as inputs since they contain absolute paths and break cache relocatability File gradleUserHome = project.getGradle().getGradleUserHomeDir(); nonInputProperties.systemProperty("gradle.user.home", gradleUserHome); + nonInputProperties.systemProperty("workspace.dir", Util.locateElasticsearchWorkspace(project.getGradle())); // we use 'temp' relative to CWD since this is per JVM and tests are forbidden from writing to CWD nonInputProperties.systemProperty("java.io.tmpdir", test.getWorkingDir().toPath().resolve("temp")); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java new file mode 100644 index 0000000000000..e9e75a711a8ff --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.Directory; +import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.Sync; +import org.gradle.api.tasks.TaskProvider; + +import static org.elasticsearch.gradle.internal.conventions.GUtils.capitalize; +import static org.elasticsearch.gradle.util.GradleUtils.getJavaSourceSets; +import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE; +import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.DIRECTORY_TYPE; + +public class EmbeddedProviderExtension { + + private final Project project; + private final TaskProvider metaTask; + + public EmbeddedProviderExtension(Project project, TaskProvider metaTask) { + this.project = project; + this.metaTask = metaTask; + } + + void impl(String implName, Project implProject) { + String projectName = implProject.getName(); + String capitalName = capitalize(projectName); + + Configuration implConfig = project.getConfigurations().detachedConfiguration(project.getDependencies().create(implProject)); + implConfig.attributes(attrs -> { + attrs.attribute(ARTIFACT_TYPE_ATTRIBUTE, DIRECTORY_TYPE); + attrs.attribute(EmbeddedProviderPlugin.IMPL_ATTR, true); + }); + + String manifestTaskName = "generate" + capitalName + "ProviderManifest"; + Provider generatedResourcesDir = project.getLayout().getBuildDirectory().dir("generated-resources"); + var generateProviderManifest = project.getTasks().register(manifestTaskName, GenerateProviderManifest.class); + generateProviderManifest.configure(t -> { + t.getManifestFile().set(generatedResourcesDir.map(d -> d.file("LISTING.TXT"))); + t.getProviderImplClasspath().from(implConfig); + }); + + String implTaskName = "generate" + capitalName + "ProviderImpl"; + var generateProviderImpl = project.getTasks().register(implTaskName, Sync.class); + generateProviderImpl.configure(t -> { + t.into(generatedResourcesDir); + t.into("IMPL-JARS/" + implName, spec -> { + spec.from(implConfig); + spec.from(generateProviderManifest); + }); + }); + metaTask.configure(t -> { t.dependsOn(generateProviderImpl); }); + + var mainSourceSet = getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME); + mainSourceSet.getOutput().dir(generateProviderImpl); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderPlugin.java new file mode 100644 index 0000000000000..99186c6221443 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderPlugin.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import org.elasticsearch.gradle.transform.UnzipTransform; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.attributes.Attribute; +import org.gradle.api.tasks.TaskProvider; + +import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE; +import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.DIRECTORY_TYPE; +import static org.gradle.api.artifacts.type.ArtifactTypeDefinition.JAR_TYPE; + +public class EmbeddedProviderPlugin implements Plugin { + static final Attribute IMPL_ATTR = Attribute.of("is.impl", Boolean.class); + + @Override + public void apply(Project project) { + + project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { + transformSpec.getFrom().attribute(ARTIFACT_TYPE_ATTRIBUTE, JAR_TYPE).attribute(IMPL_ATTR, true); + transformSpec.getTo().attribute(ARTIFACT_TYPE_ATTRIBUTE, DIRECTORY_TYPE).attribute(IMPL_ATTR, true); + transformSpec.parameters(parameters -> parameters.getIncludeArtifactName().set(true)); + }); + + TaskProvider metaTask = project.getTasks().register("generateProviderImpls"); + project.getExtensions().create("embeddedProviders", EmbeddedProviderExtension.class, project, metaTask); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java index 54bd18d3a4042..f94cc2c133acd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java @@ -55,6 +55,7 @@ public class InternalDistributionModuleCheckTaskProvider { "org.elasticsearch.grok", "org.elasticsearch.logging", "org.elasticsearch.lz4", + "org.elasticsearch.nativeaccess", "org.elasticsearch.plugin", "org.elasticsearch.plugin.analysis", "org.elasticsearch.pluginclassloader", diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index 8b21826447b46..8c5d671e00fe7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -11,12 +11,14 @@ import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.plugins.JavaLibraryPlugin; +import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.compile.CompileOptions; import org.gradle.api.tasks.compile.JavaCompile; +import org.gradle.api.tasks.testing.Test; import org.gradle.jvm.tasks.Jar; import org.gradle.jvm.toolchain.JavaLanguageVersion; import org.gradle.jvm.toolchain.JavaToolchainService; @@ -50,7 +52,7 @@ public class MrjarPlugin implements Plugin { @Override public void apply(Project project) { - project.getPluginManager().apply(JavaLibraryPlugin.class); + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); var javaExtension = project.getExtensions().getByType(JavaPluginExtension.class); var srcDir = project.getProjectDir().toPath().resolve("src"); @@ -73,9 +75,19 @@ private void addMrjarSourceset(Project project, JavaPluginExtension javaExtensio SourceSet sourceSet = javaExtension.getSourceSets().maybeCreate(sourcesetName); GradleUtils.extendSourceSet(project, SourceSet.MAIN_SOURCE_SET_NAME, sourcesetName); - project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME).configure(jarTask -> { - jarTask.into("META-INF/versions/" + javaVersion, copySpec -> copySpec.from(sourceSet.getOutput())); - jarTask.manifest(manifest -> { manifest.attributes(Map.of("Multi-Release", "true")); }); + var jarTask = project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME); + jarTask.configure(task -> { + task.into("META-INF/versions/" + javaVersion, copySpec -> copySpec.from(sourceSet.getOutput())); + task.manifest(manifest -> { manifest.attributes(Map.of("Multi-Release", "true")); }); + }); + + project.getTasks().withType(Test.class).named(JavaPlugin.TEST_TASK_NAME).configure(testTask -> { + testTask.dependsOn(jarTask); + + SourceSetContainer sourceSets = GradleUtils.getJavaSourceSets(project); + FileCollection mainRuntime = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME).getRuntimeClasspath(); + FileCollection testRuntime = sourceSets.getByName(SourceSet.TEST_SOURCE_SET_NAME).getRuntimeClasspath(); + testTask.setClasspath(testRuntime.minus(mainRuntime).plus(project.files(jarTask))); }); project.getTasks().withType(JavaCompile.class).named(sourceSet.getCompileJavaTaskName()).configure(compileTask -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java index 84728d031c40b..1ec6f023eb565 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java @@ -25,8 +25,8 @@ import java.nio.file.Paths; import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -75,7 +75,7 @@ public DockerAvailability getDockerAvailability() { Version version = null; boolean isVersionHighEnough = false; boolean isComposeAvailable = false; - Set supportedArchitectures = new HashSet<>(); + Set supportedArchitectures = EnumSet.noneOf(Architecture.class); // Check if the Docker binary exists final Optional dockerBinary = getDockerPath(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java index bddf95cae77d4..0270ee22ca8c5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolver.java @@ -101,7 +101,7 @@ private AdoptiumVersionInfo toVersionInfo(JsonNode node) { private URI resolveDownloadURI(AdoptiumVersionRequest request, AdoptiumVersionInfo versionInfo) { return URI.create( "https://api.adoptium.net/v3/binary/version/jdk-" - + versionInfo.openjdkVersion + + versionInfo.semver + "/" + request.platform + "/" diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy index 7b8129f8dbaec..6383d577f027f 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AdoptiumJdkToolchainResolverSpec.groovy @@ -42,7 +42,7 @@ class AdoptiumJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { 1, 1, "" + languageVersion.asInt() + ".1.1.1+37", - 0, "" + languageVersion.asInt() + ".1.1.1" + 0, "" + languageVersion.asInt() + ".1.1.1+37.1" ))) } @@ -52,22 +52,22 @@ class AdoptiumJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { @Override def supportedRequests() { return [ - [19, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [19, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [19, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [19, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], - [19, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], + [19, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [19, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [19, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [19, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], + [19, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-19.1.1.1+37.1/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], - [18, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [18, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [18, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [18, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], - [18, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], - [17, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [17, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [17, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"], - [17, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], - [17, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"] + [18, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [18, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [18, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [18, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], + [18, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-18.1.1.1+37.1/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], + [17, ADOPTIUM, MAC_OS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/mac/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [17, ADOPTIUM, LINUX, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/linux/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [17, ADOPTIUM, WINDOWS, X86_64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/windows/x64/jdk/hotspot/normal/eclipse?project=jdk"], + [17, ADOPTIUM, MAC_OS, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/mac/aarch64/jdk/hotspot/normal/eclipse?project=jdk"], + [17, ADOPTIUM, LINUX, AARCH64, "https://api.adoptium.net/v3/binary/version/jdk-17.1.1.1+37.1/linux/aarch64/jdk/hotspot/normal/eclipse?project=jdk"] ] } diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 6d09bd5d9fcbf..56f601fc36197 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -18,7 +18,7 @@ netty = 4.1.94.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 -antlr4 = 4.11.1 +antlr4 = 4.13.1 # when updating this version, you need to ensure compatibility with: # - distribution/tools/plugin-cli # - x-pack/plugin/security diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/OS.java b/build-tools/src/main/java/org/elasticsearch/gradle/OS.java index e00c442022369..60a534bff540b 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/OS.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/OS.java @@ -7,10 +7,10 @@ */ package org.elasticsearch.gradle; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; +import java.util.EnumMap; +import java.util.EnumSet; import java.util.Map; +import java.util.Set; import java.util.function.Supplier; public enum OS { @@ -34,7 +34,7 @@ public static OS current() { public static class Conditional { - private final Map> conditions = new HashMap<>(); + private final Map> conditions = new EnumMap<>(OS.class); public Conditional onWindows(Supplier supplier) { conditions.put(WINDOWS, supplier); @@ -58,7 +58,7 @@ public Conditional onUnix(Supplier supplier) { } public T supply() { - HashSet missingOS = new HashSet<>(Arrays.asList(OS.values())); + Set missingOS = EnumSet.allOf(OS.class); missingOS.removeAll(conditions.keySet()); if (missingOS.isEmpty() == false) { throw new IllegalArgumentException("No condition specified for " + missingOS); diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle deleted file mode 100644 index bcbc73f643298..0000000000000 --- a/client/rest-high-level/build.gradle +++ /dev/null @@ -1,68 +0,0 @@ -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.info.BuildParams - -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' -apply plugin: 'elasticsearch.rest-resources' - -group = 'org.elasticsearch.client' - -base { - archivesName = 'elasticsearch-rest-high-level-client' -} - -restResources { - //we need to copy the yaml spec so we can check naming (see RestHighlevelClientTests#testApiNamingConventions) - restApi { - include '*' - } -} - -dependencies { - api project(':modules:mapper-extras') - api project(':modules:parent-join') - api project(':modules:rank-eval') - api project(':modules:lang-mustache') - api project(':modules:aggregations') - - // Don't bundle the server or low-level rest client JARs in the shadow JAR since these get published to Maven Central - shadow project(':server') - shadow project(':client:rest') - - testImplementation project(':client:test') - testImplementation project(':test:framework') - testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - testImplementation "junit:junit:${versions.junit}" - //this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs - testImplementation project(":rest-api-spec") - // Needed for serialization tests: - // (In order to serialize a server side class to a client side class or the other way around) - testImplementation(project(':x-pack:plugin:core')) { - exclude group: 'org.elasticsearch', module: 'elasticsearch-rest-high-level-client' - } - testImplementation(project(':modules:ingest-geoip')) { - exclude group: 'com.fasterxml.jackson.core', module: 'jackson-annotations' - } - testImplementation(project(':x-pack:plugin:eql')) - testImplementation(project(':x-pack:plugin:ql:test-fixtures')) -} - -tasks.named('forbiddenApisMain').configure { - // core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already - // specified - addSignatureFiles 'http-signatures' - signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') -} - -// we don't have tests now, as HLRC is in the process of being removed -tasks.named("test").configure {enabled = false } - -// not published, so no need for javadoc -tasks.named("javadoc").configure { enabled = false } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java deleted file mode 100644 index f28aabe41f4a9..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.analytics; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; -import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; -import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Map; -import java.util.Objects; - -/** - * Builds the {@code string_stats} aggregation request. - *

- * NOTE: This extends {@linkplain AbstractAggregationBuilder} for compatibility - * with {@link SearchSourceBuilder#aggregation(AggregationBuilder)} but it - * doesn't support any "server" side things like - * {@linkplain Writeable#writeTo(StreamOutput)}, - * {@linkplain AggregationBuilder#rewrite(QueryRewriteContext)}, or - * {@linkplain AbstractAggregationBuilder#build(AggregationContext, AggregatorFactory)}. - */ -public class StringStatsAggregationBuilder extends ValuesSourceAggregationBuilder { - public static final String NAME = "string_stats"; - private static final ParseField SHOW_DISTRIBUTION_FIELD = new ParseField("show_distribution"); - - private boolean showDistribution = false; - - public StringStatsAggregationBuilder(String name) { - super(name); - } - - /** - * Compute the distribution of each character. Disabled by default. - * @return this for chaining - */ - public StringStatsAggregationBuilder showDistribution(boolean showDistribution) { - this.showDistribution = showDistribution; - return this; - } - - @Override - protected ValuesSourceType defaultValueSourceType() { - return CoreValuesSourceType.KEYWORD; - } - - @Override - public String getType() { - return NAME; - } - - @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - return builder.field(StringStatsAggregationBuilder.SHOW_DISTRIBUTION_FIELD.getPreferredName(), showDistribution); - } - - @Override - protected void innerWriteTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public BucketCardinality bucketCardinality() { - return BucketCardinality.NONE; - } - - @Override - protected ValuesSourceAggregatorFactory innerBuild( - AggregationContext context, - ValuesSourceConfig config, - AggregatorFactory parent, - Builder subFactoriesBuilder - ) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metadata) { - throw new UnsupportedOperationException(); - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), showDistribution); - } - - @Override - public boolean equals(Object obj) { - if (obj == null || getClass() != obj.getClass()) { - return false; - } - if (false == super.equals(obj)) { - return false; - } - StringStatsAggregationBuilder other = (StringStatsAggregationBuilder) obj; - return showDistribution == other.showDistribution; - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ZERO; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java deleted file mode 100644 index 335f96615d607..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.analytics; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; -import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; -import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.sort.SortBuilder; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -/** - * Builds the Top Metrics aggregation request. - *

- * NOTE: This extends {@linkplain AbstractAggregationBuilder} for compatibility - * with {@link SearchSourceBuilder#aggregation(AggregationBuilder)} but it - * doesn't support any "server" side things like - * {@linkplain Writeable#writeTo(StreamOutput)}, - * {@linkplain AggregationBuilder#rewrite(QueryRewriteContext)}, or - * {@linkplain AbstractAggregationBuilder#build(AggregationContext, AggregatorFactory)}. - */ -public class TopMetricsAggregationBuilder extends AbstractAggregationBuilder { - public static final String NAME = "top_metrics"; - - private final SortBuilder sort; - private final int size; - private final List metrics; - - /** - * Build the request. - * @param name the name of the metric - * @param sort the sort key used to select the top metrics - * @param size number of results to return per bucket - * @param metrics the names of the fields to select - */ - public TopMetricsAggregationBuilder(String name, SortBuilder sort, int size, String... metrics) { - super(name); - this.sort = sort; - this.size = size; - this.metrics = Arrays.asList(metrics); - } - - @Override - public String getType() { - return NAME; - } - - @Override - protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.startArray("sort"); - sort.toXContent(builder, params); - builder.endArray(); - builder.field("size", size); - builder.startArray("metrics"); - for (String metric : metrics) { - builder.startObject().field("field", metric).endObject(); - } - builder.endArray(); - } - return builder.endObject(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public BucketCardinality bucketCardinality() { - return BucketCardinality.NONE; - } - - @Override - protected AggregatorFactory doBuild(AggregationContext context, AggregatorFactory parent, Builder subfactoriesBuilder) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metadata) { - throw new UnsupportedOperationException(); - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_7_0; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/asyncsearch/AsyncSearchResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/asyncsearch/AsyncSearchResponse.java deleted file mode 100644 index 4ef47f7471e35..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/asyncsearch/AsyncSearchResponse.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client.asyncsearch; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.ChunkedToXContent; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; - -import java.io.IOException; - -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - -/** - * A response of an async search request. - */ -public class AsyncSearchResponse implements ToXContentObject { - @Nullable - private final String id; - @Nullable - private final SearchResponse searchResponse; - @Nullable - private final ElasticsearchException error; - private final boolean isRunning; - private final boolean isPartial; - - private final long startTimeMillis; - private final long expirationTimeMillis; - - /** - * Creates an {@link AsyncSearchResponse} with the arguments that are always present in the server response - */ - AsyncSearchResponse( - boolean isPartial, - boolean isRunning, - long startTimeMillis, - long expirationTimeMillis, - @Nullable String id, - @Nullable SearchResponse searchResponse, - @Nullable ElasticsearchException error - ) { - this.isPartial = isPartial; - this.isRunning = isRunning; - this.startTimeMillis = startTimeMillis; - this.expirationTimeMillis = expirationTimeMillis; - this.id = id; - this.searchResponse = searchResponse; - this.error = error; - } - - /** - * Returns the id of the async search request or null if the response is not stored in the cluster. - */ - @Nullable - public String getId() { - return id; - } - - /** - * Returns the current {@link SearchResponse} or null if not available. - * - * See {@link #isPartial()} to determine whether the response contains partial or complete - * results. - */ - public SearchResponse getSearchResponse() { - return searchResponse; - } - - /** - * Returns the failure reason or null if the query is running or has completed normally. - */ - public ElasticsearchException getFailure() { - return error; - } - - /** - * Returns true if the {@link SearchResponse} contains partial - * results computed from a subset of the total shards. - */ - public boolean isPartial() { - return isPartial; - } - - /** - * Whether the search is still running in the cluster. - * - * A value of false indicates that the response is final - * even if {@link #isPartial()} returns true. In such case, - * the partial response represents the status of the search before a - * non-recoverable failure. - */ - public boolean isRunning() { - return isRunning; - } - - /** - * When this response was created as a timestamp in milliseconds since epoch. - */ - public long getStartTime() { - return startTimeMillis; - } - - /** - * When this response will expired as a timestamp in milliseconds since epoch. - */ - public long getExpirationTime() { - return expirationTimeMillis; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (id != null) { - builder.field("id", id); - } - builder.field("is_partial", isPartial); - builder.field("is_running", isRunning); - builder.field("start_time_in_millis", startTimeMillis); - builder.field("expiration_time_in_millis", expirationTimeMillis); - - if (searchResponse != null) { - builder.field("response"); - ChunkedToXContent.wrapAsToXContent(searchResponse).toXContent(builder, params); - } - if (error != null) { - builder.startObject("error"); - error.toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - return builder; - } - - public static final ParseField ID_FIELD = new ParseField("id"); - public static final ParseField IS_PARTIAL_FIELD = new ParseField("is_partial"); - public static final ParseField IS_RUNNING_FIELD = new ParseField("is_running"); - public static final ParseField START_TIME_FIELD = new ParseField("start_time_in_millis"); - public static final ParseField EXPIRATION_FIELD = new ParseField("expiration_time_in_millis"); - public static final ParseField RESPONSE_FIELD = new ParseField("response"); - public static final ParseField ERROR_FIELD = new ParseField("error"); - - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "submit_async_search_response", - true, - args -> new AsyncSearchResponse( - (boolean) args[0], - (boolean) args[1], - (long) args[2], - (long) args[3], - (String) args[4], - (SearchResponse) args[5], - (ElasticsearchException) args[6] - ) - ); - static { - PARSER.declareBoolean(constructorArg(), IS_PARTIAL_FIELD); - PARSER.declareBoolean(constructorArg(), IS_RUNNING_FIELD); - PARSER.declareLong(constructorArg(), START_TIME_FIELD); - PARSER.declareLong(constructorArg(), EXPIRATION_FIELD); - PARSER.declareString(optionalConstructorArg(), ID_FIELD); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> AsyncSearchResponse.parseSearchResponse(p), RESPONSE_FIELD); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), ERROR_FIELD); - } - - private static SearchResponse parseSearchResponse(XContentParser p) throws IOException { - // we should be before the opening START_OBJECT of the response - ensureExpectedToken(Token.START_OBJECT, p.currentToken(), p); - p.nextToken(); - return SearchResponse.innerFromXContent(p); - } - - public static AsyncSearchResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - @Override - public String toString() { - return Strings.toString(this); - } -} diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt deleted file mode 100644 index c887a39da44b1..0000000000000 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License -# 2.0 and the Server Side Public License, v 1; you may not use this file except -# in compliance with, at your election, the Elastic License 2.0 or the Server -# Side Public License, v 1. - -@defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type -org.apache.http.entity.ContentType#create(java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) - -@defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users -org.elasticsearch.common.logging.DeprecationLogger -org.elasticsearch.common.logging.LogConfigurator -org.elasticsearch.common.logging.LoggerMessageFormat -org.elasticsearch.common.logging.Loggers -org.elasticsearch.common.logging.NodeNamePatternConverter -org.elasticsearch.common.logging.PrefixLogger - -@defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! -org.elasticsearch.common.xcontent.LoggingDeprecationHandler - -@defaultMessage Use Nonblocking org.apache.http.nio.entity.NByteArrayEntity -org.apache.http.entity.ByteArrayEntity -org.apache.http.entity.StringEntity diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 4a8b3da4777a0..850ee3fc71a22 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -19,6 +19,8 @@ final class SystemJvmOptions { static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { + String distroType = sysprops.get("es.distribution.type"); + boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot"); return Stream.of( /* * Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl; @@ -65,10 +67,12 @@ static List systemJvmOptions(Settings nodeSettings, final Map= 21) { + return "--enable-native-access=org.elasticsearch.nativeaccess"; + } + return ""; + } } diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 13c1154a5a8be..86862769c70e3 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -46,9 +46,6 @@ ifeval::["{release-state}"!="unreleased"] :version_qualified: {bare_version} endif::[] -:javadoc-license: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/license -:javadoc-watcher: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/watcher - /////// Shared attribute values are pulled from elastic/docs /////// diff --git a/docs/changelog/102726.yaml b/docs/changelog/102726.yaml new file mode 100644 index 0000000000000..bc5b311481123 --- /dev/null +++ b/docs/changelog/102726.yaml @@ -0,0 +1,5 @@ +pr: 102726 +summary: Resolve Cluster API +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/102885.yaml b/docs/changelog/102885.yaml new file mode 100644 index 0000000000000..7a998c3eb1f66 --- /dev/null +++ b/docs/changelog/102885.yaml @@ -0,0 +1,5 @@ +pr: 102885 +summary: Make field limit more predictable +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/103171.yaml b/docs/changelog/103171.yaml new file mode 100644 index 0000000000000..95ad6a1ea77c2 --- /dev/null +++ b/docs/changelog/103171.yaml @@ -0,0 +1,7 @@ +pr: 103171 +summary: "Add `unmatch_mapping_type`, and support array of types" +area: Mapping +type: feature +issues: + - 102807 + - 102795 diff --git a/docs/changelog/103481.yaml b/docs/changelog/103481.yaml new file mode 100644 index 0000000000000..f7c7c0b6eecc9 --- /dev/null +++ b/docs/changelog/103481.yaml @@ -0,0 +1,5 @@ +pr: 103481 +summary: Redirect failed ingest node operations to a failure store when available +area: Data streams +type: feature +issues: [] diff --git a/docs/changelog/103511.yaml b/docs/changelog/103511.yaml deleted file mode 100644 index 20a48df914832..0000000000000 --- a/docs/changelog/103511.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103511 -summary: Downsampling supports `date_histogram` with tz -area: Downsampling -type: bug -issues: - - 101309 diff --git a/docs/changelog/103535.yaml b/docs/changelog/103535.yaml new file mode 100644 index 0000000000000..80cf6e1ea709a --- /dev/null +++ b/docs/changelog/103535.yaml @@ -0,0 +1,5 @@ +pr: 103535 +summary: Add replay diagnostic dir to system jvm options +area: Infra/CLI +type: enhancement +issues: [] diff --git a/docs/changelog/103651.yaml b/docs/changelog/103651.yaml new file mode 100644 index 0000000000000..1106044b31fd2 --- /dev/null +++ b/docs/changelog/103651.yaml @@ -0,0 +1,12 @@ +pr: 103651 +summary: Flag in `_field_caps` to return only fields with values in index +area: Search +type: enhancement +issues: [] +highlight: + title: Flag in `_field_caps` to return only fields with values in index + body: |- + We added support for filtering the field capabilities API output by removing + fields that don't have a value. This can be done through the newly added + `include_empty_fields` parameter, which defaults to true. + notable: true diff --git a/docs/changelog/103741.yaml b/docs/changelog/103741.yaml deleted file mode 100644 index 6771ddd329f46..0000000000000 --- a/docs/changelog/103741.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103741 -summary: Limit nesting depth in Exception XContent -area: Infra/Resiliency -type: bug -issues: [] diff --git a/docs/changelog/103817.yaml b/docs/changelog/103817.yaml deleted file mode 100644 index ff8978f1d3776..0000000000000 --- a/docs/changelog/103817.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103817 -summary: Fix deleting index during snapshot finalization -area: Snapshot/Restore -type: bug -issues: - - 101029 diff --git a/docs/changelog/103819.yaml b/docs/changelog/103819.yaml deleted file mode 100644 index ef6e717572cc5..0000000000000 --- a/docs/changelog/103819.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103819 -summary: Add retry logic for 500 and 503 errors for OpenAI -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104087.yaml b/docs/changelog/104087.yaml deleted file mode 100644 index 614e2d0de7e58..0000000000000 --- a/docs/changelog/104087.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 104087 -summary: Deprecate machine learning on Intel macOS -area: Machine Learning -type: deprecation -issues: [] -deprecation: - title: Deprecate machine learning on Intel macOS - area: Packaging - details: The machine learning plugin will be permanently disabled on macOS x86_64 - in new minor versions released from December 2024 onwards. - impact: To continue to use machine learning functionality on macOS please switch to - an arm64 machine (Apple silicon). Alternatively, it will still be possible to run - Elasticsearch with machine learning enabled in a Docker container on macOS x86_64. diff --git a/docs/changelog/104145.yaml b/docs/changelog/104145.yaml deleted file mode 100644 index 41dd1f97ebe8b..0000000000000 --- a/docs/changelog/104145.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104145 -summary: Fix _alias/ returning non-matching data streams -area: Data streams -type: bug -issues: - - 96589 diff --git a/docs/changelog/104198.yaml b/docs/changelog/104198.yaml deleted file mode 100644 index 0b5b4680c2d88..0000000000000 --- a/docs/changelog/104198.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104198 -summary: "[Connector API] Fix bug in configuration validation parser" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/104227.yaml b/docs/changelog/104227.yaml new file mode 100644 index 0000000000000..64dcf844f23f2 --- /dev/null +++ b/docs/changelog/104227.yaml @@ -0,0 +1,6 @@ +pr: 104227 +summary: Avoid wrapping searchers multiple times in mget +area: CRUD +type: enhancement +issues: + - 85069 diff --git a/docs/changelog/104281.yaml b/docs/changelog/104281.yaml deleted file mode 100644 index 087e91d83ab3b..0000000000000 --- a/docs/changelog/104281.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104281 -summary: Data streams fix failure store delete -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/104288.yaml b/docs/changelog/104288.yaml deleted file mode 100644 index 67f54e37cf9dc..0000000000000 --- a/docs/changelog/104288.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104288 -summary: Don't throw error for remote shards that open PIT filtered out -area: Search -type: bug -issues: - - 102596 diff --git a/docs/changelog/104289.yaml b/docs/changelog/104289.yaml deleted file mode 100644 index 9df8f8ecd4add..0000000000000 --- a/docs/changelog/104289.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104289 -summary: Better handling of async processor failures -area: Ingest Node -type: bug -issues: - - 101921 diff --git a/docs/changelog/104314.yaml b/docs/changelog/104314.yaml deleted file mode 100644 index a17e810a2c023..0000000000000 --- a/docs/changelog/104314.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104314 -summary: "[LTR] `FieldValueExtrator` - Checking if fetched values is empty" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104418.yaml b/docs/changelog/104418.yaml deleted file mode 100644 index d27b66cebea87..0000000000000 --- a/docs/changelog/104418.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104418 -summary: Fix `routing_path` when template has multiple `path_match` and multi-fields -area: TSDB -type: bug -issues: - - 104400 diff --git a/docs/changelog/104440.yaml b/docs/changelog/104440.yaml new file mode 100644 index 0000000000000..4242b7786f05f --- /dev/null +++ b/docs/changelog/104440.yaml @@ -0,0 +1,6 @@ +pr: 104440 +summary: Fix write index resolution when an alias is pointing to a TSDS +area: Data streams +type: bug +issues: + - 104189 diff --git a/docs/changelog/104483.yaml b/docs/changelog/104483.yaml new file mode 100644 index 0000000000000..99917b4e8e017 --- /dev/null +++ b/docs/changelog/104483.yaml @@ -0,0 +1,5 @@ +pr: 104483 +summary: Make `task_type` optional in `_inference` APIs +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/104523.yaml b/docs/changelog/104523.yaml deleted file mode 100644 index d9e7d207dc23a..0000000000000 --- a/docs/changelog/104523.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104523 -summary: "ESQL: Allow grouping by null blocks" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104574.yaml b/docs/changelog/104574.yaml new file mode 100644 index 0000000000000..68be002142fd9 --- /dev/null +++ b/docs/changelog/104574.yaml @@ -0,0 +1,10 @@ +pr: 104574 +summary: Deprecate `client.type` +area: Infra/Core +type: deprecation +issues: [] +deprecation: + title: Deprecate `client.type` + area: Cluster and node setting + details: The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release. + impact: Remove the `client.type` setting from `elasticsearch.yml` diff --git a/docs/changelog/104585.yaml b/docs/changelog/104585.yaml deleted file mode 100644 index 8c2b20fe54d0c..0000000000000 --- a/docs/changelog/104585.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104585 -summary: Ingest correctly handle upsert operations and drop processors together -area: Ingest Node -type: bug -issues: - - 36746 diff --git a/docs/changelog/104586.yaml b/docs/changelog/104586.yaml deleted file mode 100644 index db1d01c22eff6..0000000000000 --- a/docs/changelog/104586.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104586 -summary: Reduce the number of Evals `ReplaceMissingFieldWithNull` creates -area: ES|QL -type: bug -issues: - - 104583 diff --git a/docs/changelog/104591.yaml b/docs/changelog/104591.yaml deleted file mode 100644 index 0bd054385753f..0000000000000 --- a/docs/changelog/104591.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104591 -summary: Avoid execute ESQL planning on refresh thread -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104600.yaml b/docs/changelog/104600.yaml deleted file mode 100644 index 5337116ba37bc..0000000000000 --- a/docs/changelog/104600.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104600 -summary: "[Profiling] Query in parallel on content nodes" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/104606.yaml b/docs/changelog/104606.yaml deleted file mode 100644 index f419c21e0a17d..0000000000000 --- a/docs/changelog/104606.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104606 -summary: Fix bug when `latest` transform is used together with `from` parameter -area: Transform -type: bug -issues: - - 104543 diff --git a/docs/changelog/104614.yaml b/docs/changelog/104614.yaml new file mode 100644 index 0000000000000..9b2c25a643825 --- /dev/null +++ b/docs/changelog/104614.yaml @@ -0,0 +1,6 @@ +pr: 104614 +summary: Extend `repository_integrity` health indicator for unknown and invalid repos +area: Health +type: enhancement +issues: + - 103784 diff --git a/docs/changelog/104636.yaml b/docs/changelog/104636.yaml new file mode 100644 index 0000000000000..d74682f2eba18 --- /dev/null +++ b/docs/changelog/104636.yaml @@ -0,0 +1,5 @@ +pr: 104636 +summary: Modifying request builders +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/104722.yaml b/docs/changelog/104722.yaml deleted file mode 100644 index ed9f2d41ff908..0000000000000 --- a/docs/changelog/104722.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104722 -summary: Avoid possible datafeed infinite loop with filtering aggregations -area: Machine Learning -type: bug -issues: - - 104699 diff --git a/docs/changelog/104802.yaml b/docs/changelog/104802.yaml deleted file mode 100644 index d535318043ca2..0000000000000 --- a/docs/changelog/104802.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104802 -summary: "[Connectors API] Fix bug when triggering a sync job via API" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/104808.yaml b/docs/changelog/104808.yaml deleted file mode 100644 index 7682db085c7a9..0000000000000 --- a/docs/changelog/104808.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104808 -summary: Fix lost headers with chunked responses -area: Network -type: bug -issues: [] diff --git a/docs/changelog/104832.yaml b/docs/changelog/104832.yaml deleted file mode 100644 index 89f837b1c3475..0000000000000 --- a/docs/changelog/104832.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104832 -summary: Limit concurrent shards per node for ESQL -area: ES|QL -type: bug -issues: - - 103666 diff --git a/docs/changelog/104859.yaml b/docs/changelog/104859.yaml new file mode 100644 index 0000000000000..55e5758e31ae2 --- /dev/null +++ b/docs/changelog/104859.yaml @@ -0,0 +1,5 @@ +pr: 104859 +summary: ES - document observing with rejections +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/104872.yaml b/docs/changelog/104872.yaml new file mode 100644 index 0000000000000..ad70946be02ae --- /dev/null +++ b/docs/changelog/104872.yaml @@ -0,0 +1,5 @@ +pr: 104872 +summary: Add new int8_flat and flat vector index types +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/104891.yaml b/docs/changelog/104891.yaml deleted file mode 100644 index 690f2c4b11f88..0000000000000 --- a/docs/changelog/104891.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104891 -summary: "ESQL: Fix `SearchStats#count(String)` to count values not rows" -area: ES|QL -type: bug -issues: - - 104795 diff --git a/docs/changelog/104895.yaml b/docs/changelog/104895.yaml new file mode 100644 index 0000000000000..020dcff891f03 --- /dev/null +++ b/docs/changelog/104895.yaml @@ -0,0 +1,5 @@ +pr: 104895 +summary: Aggs support for Query API Key Information API +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/104904.yaml b/docs/changelog/104904.yaml deleted file mode 100644 index 07e22feb144ed..0000000000000 --- a/docs/changelog/104904.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104904 -summary: Improve `CANNOT_REBALANCE_CAN_ALLOCATE` explanation -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/104905.yaml b/docs/changelog/104905.yaml new file mode 100644 index 0000000000000..80e06dc3b0cf5 --- /dev/null +++ b/docs/changelog/104905.yaml @@ -0,0 +1,6 @@ +pr: 104905 +summary: "Execute lazy rollover with an internal dedicated user #104732" +area: Data streams +type: bug +issues: + - 104732 diff --git a/docs/changelog/104949.yaml b/docs/changelog/104949.yaml new file mode 100644 index 0000000000000..c2682fc911f1d --- /dev/null +++ b/docs/changelog/104949.yaml @@ -0,0 +1,5 @@ +pr: 104949 +summary: Add text_embedding inference service with multilingual-e5 and custom eland models +area: Machine Learning +type: enhancement +issues: [ ] diff --git a/docs/changelog/104958.yaml b/docs/changelog/104958.yaml new file mode 100644 index 0000000000000..936342db03b45 --- /dev/null +++ b/docs/changelog/104958.yaml @@ -0,0 +1,5 @@ +pr: 104958 +summary: "ESQL: Extend STATS command to support aggregate expressions" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105024.yaml b/docs/changelog/105024.yaml new file mode 100644 index 0000000000000..96268b78ddf5d --- /dev/null +++ b/docs/changelog/105024.yaml @@ -0,0 +1,6 @@ +pr: 105024 +summary: "[Connectors API] Fix bug with crawler configuration parsing and `sync_now`\ + \ flag" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/105044.yaml b/docs/changelog/105044.yaml new file mode 100644 index 0000000000000..5a9a11f928f98 --- /dev/null +++ b/docs/changelog/105044.yaml @@ -0,0 +1,5 @@ +pr: 105044 +summary: Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/105048.yaml b/docs/changelog/105048.yaml new file mode 100644 index 0000000000000..d865f447a0a93 --- /dev/null +++ b/docs/changelog/105048.yaml @@ -0,0 +1,6 @@ +pr: 105048 +summary: "ES|QL: Fix exception handling on `date_parse` with wrong date pattern" +area: ES|QL +type: bug +issues: + - 104124 diff --git a/docs/changelog/105061.yaml b/docs/changelog/105061.yaml new file mode 100644 index 0000000000000..ae8a36183e0e7 --- /dev/null +++ b/docs/changelog/105061.yaml @@ -0,0 +1,6 @@ +pr: 105061 +summary: "ESQL: Push CIDR_MATCH to Lucene if possible" +area: ES|QL +type: bug +issues: + - 105042 diff --git a/docs/changelog/105062.yaml b/docs/changelog/105062.yaml new file mode 100644 index 0000000000000..928786f62381a --- /dev/null +++ b/docs/changelog/105062.yaml @@ -0,0 +1,5 @@ +pr: 105062 +summary: Nest pass-through objects within objects +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/105064.yaml b/docs/changelog/105064.yaml new file mode 100644 index 0000000000000..81c62b3148f1c --- /dev/null +++ b/docs/changelog/105064.yaml @@ -0,0 +1,17 @@ +pr: 105064 +summary: "ES|QL: remove PROJECT keyword from the grammar" +area: ES|QL +type: breaking +issues: [] +breaking: + title: "ES|QL: remove PROJECT keyword from the grammar" + area: REST API + details: "Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar" + impact: "Before this change, users could use PROJECT as an alias for KEEP in ESQL queries,\ + \ (eg. 'FROM idx | PROJECT name, surname')\ + \ the parser replaced PROJECT with KEEP, emitted a warning:\ + \ 'PROJECT command is no longer supported, please use KEEP instead'\ + \ and the query was executed normally.\ + \ With this change, PROJECT command is no longer recognized by the query parser;\ + \ queries using PROJECT command now return a parsing exception." + notable: false diff --git a/docs/changelog/105081.yaml b/docs/changelog/105081.yaml new file mode 100644 index 0000000000000..efa686bd7b4a4 --- /dev/null +++ b/docs/changelog/105081.yaml @@ -0,0 +1,6 @@ +pr: 105081 +summary: For empty mappings use a `LocalRelation` +area: ES|QL +type: bug +issues: + - 104809 diff --git a/docs/changelog/105088.yaml b/docs/changelog/105088.yaml new file mode 100644 index 0000000000000..8b5d1fa7f9e02 --- /dev/null +++ b/docs/changelog/105088.yaml @@ -0,0 +1,5 @@ +pr: 105088 +summary: "ESQL: Speed up reading many nulls" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105089.yaml b/docs/changelog/105089.yaml new file mode 100644 index 0000000000000..6f43c58af8a41 --- /dev/null +++ b/docs/changelog/105089.yaml @@ -0,0 +1,6 @@ +pr: 105089 +summary: Return results in order +area: Transform +type: bug +issues: + - 104847 diff --git a/docs/changelog/105096.yaml b/docs/changelog/105096.yaml new file mode 100644 index 0000000000000..bfc72a6277bb1 --- /dev/null +++ b/docs/changelog/105096.yaml @@ -0,0 +1,5 @@ +pr: 105096 +summary: Harden index mapping parameter check in enrich runner +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/105103.yaml b/docs/changelog/105103.yaml new file mode 100644 index 0000000000000..599d2e3666e4b --- /dev/null +++ b/docs/changelog/105103.yaml @@ -0,0 +1,5 @@ +pr: 105103 +summary: Do not record s3 http request time when it is not available +area: Snapshot/Restore +type: bug +issues: [] diff --git a/docs/changelog/105105.yaml b/docs/changelog/105105.yaml new file mode 100644 index 0000000000000..848a9637d1388 --- /dev/null +++ b/docs/changelog/105105.yaml @@ -0,0 +1,5 @@ +pr: 105105 +summary: Add s3 `HeadObject` request to request stats +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/105131.yaml b/docs/changelog/105131.yaml new file mode 100644 index 0000000000000..36993527da583 --- /dev/null +++ b/docs/changelog/105131.yaml @@ -0,0 +1,5 @@ +pr: 105131 +summary: "[Connector API] Support filtering by name, index name in list action" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/105150.yaml b/docs/changelog/105150.yaml new file mode 100644 index 0000000000000..d9fc3d337f952 --- /dev/null +++ b/docs/changelog/105150.yaml @@ -0,0 +1,5 @@ +pr: 105150 +summary: Remove `SearchException` usages without a proper status code +area: Search +type: bug +issues: [] diff --git a/docs/changelog/105153.yaml b/docs/changelog/105153.yaml new file mode 100644 index 0000000000000..6c6b1f995df4b --- /dev/null +++ b/docs/changelog/105153.yaml @@ -0,0 +1,6 @@ +pr: 105153 +summary: Field-caps should read fields from up-to-dated shards +area: "Search" +type: bug +issues: + - 104809 diff --git a/docs/changelog/105163.yaml b/docs/changelog/105163.yaml new file mode 100644 index 0000000000000..f28bf4de14792 --- /dev/null +++ b/docs/changelog/105163.yaml @@ -0,0 +1,5 @@ +pr: 105163 +summary: Add stable `ThreadPool` constructor to `LogstashInternalBridge` +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/105164.yaml b/docs/changelog/105164.yaml new file mode 100644 index 0000000000000..7affb0911bc6d --- /dev/null +++ b/docs/changelog/105164.yaml @@ -0,0 +1,6 @@ +pr: 105164 +summary: Remove duplicate checkpoint audits +area: Transform +type: bug +issues: + - 105106 diff --git a/docs/changelog/105178.yaml b/docs/changelog/105178.yaml new file mode 100644 index 0000000000000..e8fc9cfd6898f --- /dev/null +++ b/docs/changelog/105178.yaml @@ -0,0 +1,5 @@ +pr: 105178 +summary: "[Connector API] Support filtering connectors by service type and a query" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/105180.yaml b/docs/changelog/105180.yaml new file mode 100644 index 0000000000000..ac7ed20f151b7 --- /dev/null +++ b/docs/changelog/105180.yaml @@ -0,0 +1,5 @@ +pr: 105180 +summary: Use new `ignore_dynamic_beyond_limit` in logs and metric data streams +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/105183.yaml b/docs/changelog/105183.yaml new file mode 100644 index 0000000000000..04ec159cf02d0 --- /dev/null +++ b/docs/changelog/105183.yaml @@ -0,0 +1,7 @@ +pr: 105183 +summary: Fix handling surrogate pairs in the XLM Roberta tokenizer +area: Machine Learning +type: bug +issues: + - 104626 + - 104981 diff --git a/docs/changelog/105192.yaml b/docs/changelog/105192.yaml new file mode 100644 index 0000000000000..b15d58ef40fe7 --- /dev/null +++ b/docs/changelog/105192.yaml @@ -0,0 +1,6 @@ +pr: 105192 +summary: Allow transforms to use PIT with remote clusters again +area: Transform +type: enhancement +issues: + - 104518 diff --git a/docs/changelog/105207.yaml b/docs/changelog/105207.yaml new file mode 100644 index 0000000000000..00d227248abfb --- /dev/null +++ b/docs/changelog/105207.yaml @@ -0,0 +1,6 @@ +pr: 105207 +summary: Introduce an `AggregatorReducer` to reduce the footprint of aggregations + in the coordinating node +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/105213.yaml b/docs/changelog/105213.yaml new file mode 100644 index 0000000000000..40595a8166ef2 --- /dev/null +++ b/docs/changelog/105213.yaml @@ -0,0 +1,5 @@ +pr: 105213 +summary: Inference service should reject tasks during shutdown +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/105221.yaml b/docs/changelog/105221.yaml new file mode 100644 index 0000000000000..2ef64ef110d95 --- /dev/null +++ b/docs/changelog/105221.yaml @@ -0,0 +1,14 @@ +pr: 105221 +summary: "ESQL: Grammar - FROM METADATA no longer requires []" +area: ES|QL +type: breaking +issues: [] +breaking: + title: "ESQL: Grammar - FROM METADATA no longer requires []" + area: REST API + details: "Remove [ ] for METADATA option inside FROM command statements" + impact: "Previously to return metadata fields, one had to use square brackets:\ + \ (eg. 'FROM index [METADATA _index]').\ + \ This is no longer needed: the [ ] are dropped and do not have to be specified,\ + \ thus simplifying the command above to:'FROM index METADATA _index'." + notable: false diff --git a/docs/changelog/105228.yaml b/docs/changelog/105228.yaml new file mode 100644 index 0000000000000..7526a3caa81d9 --- /dev/null +++ b/docs/changelog/105228.yaml @@ -0,0 +1,6 @@ +pr: 105228 +summary: Downsampling better handle if source index isn't allocated and fix bug in + retrieving last processed tsid +area: Downsampling +type: bug +issues: [] diff --git a/docs/changelog/105232.yaml b/docs/changelog/105232.yaml new file mode 100644 index 0000000000000..a2ad7ad9451e9 --- /dev/null +++ b/docs/changelog/105232.yaml @@ -0,0 +1,6 @@ +pr: 105232 +summary: Execute SAML authentication on the generic threadpool +area: Authentication +type: bug +issues: + - 104962 diff --git a/docs/changelog/105234.yaml b/docs/changelog/105234.yaml new file mode 100644 index 0000000000000..eac54b948d4f6 --- /dev/null +++ b/docs/changelog/105234.yaml @@ -0,0 +1,6 @@ +pr: 105234 +summary: Do not log warning when triggering an `ABORTING` transform +area: Transform +type: bug +issues: + - 105233 diff --git a/docs/changelog/105245.yaml b/docs/changelog/105245.yaml new file mode 100644 index 0000000000000..f6093f2c7435e --- /dev/null +++ b/docs/changelog/105245.yaml @@ -0,0 +1,6 @@ +pr: 105245 +summary: Finalize all snapshots completed by shard snapshot updates +area: Snapshot/Restore +type: bug +issues: + - 104939 diff --git a/docs/changelog/105249.yaml b/docs/changelog/105249.yaml new file mode 100644 index 0000000000000..979253e452008 --- /dev/null +++ b/docs/changelog/105249.yaml @@ -0,0 +1,5 @@ +pr: 105249 +summary: "[Connector API] Support updating configuration values only" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/105258.yaml b/docs/changelog/105258.yaml new file mode 100644 index 0000000000000..e31e6ec0de749 --- /dev/null +++ b/docs/changelog/105258.yaml @@ -0,0 +1,5 @@ +pr: 105258 +summary: Close `currentChunkedWrite` on client cancel +area: Network +type: bug +issues: [] diff --git a/docs/changelog/105259.yaml b/docs/changelog/105259.yaml new file mode 100644 index 0000000000000..a360bc8bc1672 --- /dev/null +++ b/docs/changelog/105259.yaml @@ -0,0 +1,5 @@ +pr: 105259 +summary: Lower G1 minimum full GC interval +area: Infra/Circuit Breakers +type: enhancement +issues: [] diff --git a/docs/changelog/105269.yaml b/docs/changelog/105269.yaml new file mode 100644 index 0000000000000..acf05b05ecfc4 --- /dev/null +++ b/docs/changelog/105269.yaml @@ -0,0 +1,5 @@ +pr: 105269 +summary: Reserve bytes before serializing page +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/105273.yaml b/docs/changelog/105273.yaml new file mode 100644 index 0000000000000..83db9eac2a14a --- /dev/null +++ b/docs/changelog/105273.yaml @@ -0,0 +1,6 @@ +pr: 105273 +summary: "x-pack/plugin/core: make automatic rollovers lazy" +area: Data streams +type: enhancement +issues: + - 104083 diff --git a/docs/changelog/105289.yaml b/docs/changelog/105289.yaml new file mode 100644 index 0000000000000..a51778a93beb8 --- /dev/null +++ b/docs/changelog/105289.yaml @@ -0,0 +1,5 @@ +pr: 105289 +summary: "[Connector API] Change required privileges to indices:data/read(write)" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/105293.yaml b/docs/changelog/105293.yaml new file mode 100644 index 0000000000000..33eb3884a7e53 --- /dev/null +++ b/docs/changelog/105293.yaml @@ -0,0 +1,6 @@ +pr: 105293 +summary: Fix leaked HTTP response sent after close +area: Network +type: bug +issues: + - 104651 diff --git a/docs/changelog/105299.yaml b/docs/changelog/105299.yaml new file mode 100644 index 0000000000000..b1f9b3ac4a2aa --- /dev/null +++ b/docs/changelog/105299.yaml @@ -0,0 +1,6 @@ +pr: 105299 +summary: Conditionally send the dimensions field as part of the openai requests +area: Machine Learning +type: enhancement +issues: + - 105005 diff --git a/docs/changelog/105306.yaml b/docs/changelog/105306.yaml new file mode 100644 index 0000000000000..7b75c370901ab --- /dev/null +++ b/docs/changelog/105306.yaml @@ -0,0 +1,5 @@ +pr: 105306 +summary: Fix race in HTTP response shutdown handling +area: Network +type: bug +issues: [] diff --git a/docs/changelog/105334.yaml b/docs/changelog/105334.yaml new file mode 100644 index 0000000000000..498fdf4113b3c --- /dev/null +++ b/docs/changelog/105334.yaml @@ -0,0 +1,6 @@ +pr: 105334 +summary: Upgrade ANTLR4 to 4.13.1 +area: Query Languages +type: upgrade +issues: + - 102953 diff --git a/docs/changelog/105346.yaml b/docs/changelog/105346.yaml new file mode 100644 index 0000000000000..7c6eab93f6c10 --- /dev/null +++ b/docs/changelog/105346.yaml @@ -0,0 +1,5 @@ +pr: 105346 +summary: Allow GET inference models by user a with read only permission +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/105403.yaml b/docs/changelog/105403.yaml new file mode 100644 index 0000000000000..f855c0e8ed94f --- /dev/null +++ b/docs/changelog/105403.yaml @@ -0,0 +1,6 @@ +pr: 105403 +summary: "ESQL: make `cidr_match` foldable" +area: ES|QL +type: bug +issues: + - 105376 diff --git a/docs/changelog/99747.yaml b/docs/changelog/99747.yaml new file mode 100644 index 0000000000000..94aefbf25d8e5 --- /dev/null +++ b/docs/changelog/99747.yaml @@ -0,0 +1,5 @@ +pr: 99747 +summary: TSDB dimensions encoding +area: TSDB +type: enhancement +issues: [] diff --git a/docs/internal/DistributedArchitectureGuide.md b/docs/internal/DistributedArchitectureGuide.md new file mode 100644 index 0000000000000..ea5942ff71cc8 --- /dev/null +++ b/docs/internal/DistributedArchitectureGuide.md @@ -0,0 +1,281 @@ +# Distributed Area Team Internals + +(Summary, brief discussion of our features) + +# Networking + +### ThreadPool + +(We have many thread pools, what and why) + +### ActionListener + +`ActionListener`s are a means off injecting logic into lower layers of the code. They encapsulate a block of code that takes a response +value -- the `onResponse()` method --, and then that block of code (the `ActionListener`) is passed into a function that will eventually +execute the code (call `onResponse()`) when a response value is available. `ActionListener`s are used to pass code down to act on a result, +rather than lower layers returning a result back up to be acted upon by the caller. One of three things can happen to a listener: it can be +executed in the same thread — e.g. `ActionListener.run()` --; it can be passed off to another thread to be executed; or it can be added to +a list someplace, to eventually be executed by some service. `ActionListener`s also define `onFailure()` logic, in case an error is +encountered before a result can be formed. + +This pattern is often used in the transport action layer with the use of the +[ChannelActionListener]([url](https://github.com/elastic/elasticsearch/blob/8.12/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java)) +class, which wraps a `TransportChannel` produced by the transport layer. `TransportChannel` implementations can hold a reference to a Netty +channel with which to pass the response back to the network caller. Netty has a many-to-one association of network callers to channels, so +a call taking a long time generally won't hog resources: it's cheap. A transport action can take hours to respond and that's alright, +barring caller timeouts. + +(TODO: add useful starter references and explanations for a range of Listener classes. Reference the Netty section.) + +### REST Layer + +(including how REST and Transport layers are bound together through the ActionModule) + +### Transport Layer + +### Chunk Encoding + +#### XContent + +### Performance + +### Netty + +(long running actions should be forked off of the Netty thread. Keep short operations to avoid forking costs) + +### Work Queues + +# Cluster Coordination + +(Sketch of important classes? Might inform more sections to add for details.) + +(A NodeB can coordinate a search across several other nodes, when NodeB itself does not have the data, and then return a result to the caller. Explain this coordinating role) + +### Node Roles + +### Master Nodes + +### Master Elections + +(Quorum, terms, any eligibility limitations) + +### Cluster Formation / Membership + +(Explain joining, and how it happens every time a new master is elected) + +#### Discovery + +### Master Transport Actions + +### Cluster State + +#### Master Service + +#### Cluster State Publication + +(Majority concensus to apply, what happens if a master-eligible node falls behind / is incommunicado.) + +#### Cluster State Application + +(Go over the two kinds of listeners -- ClusterStateApplier and ClusterStateListener?) + +#### Persistence + +(Sketch ephemeral vs persisted cluster state.) + +(what's the format for persisted metadata) + +# Replication + +(More Topics: ReplicationTracker concepts / highlights.) + +### What is a Shard + +### Primary Shard Selection + +(How a primary shard is chosen) + +#### Versioning + +(terms and such) + +### How Data Replicates + +(How an index write replicates across shards -- TransportReplicationAction?) + +### Consistency Guarantees + +(What guarantees do we give the user about persistence and readability?) + +# Locking + +(rarely use locks) + +### ShardLock + +### Translog / Engine Locking + +### Lucene Locking + +# Engine + +(What does Engine mean in the distrib layer? Distinguish Engine vs Directory vs Lucene) + +(High level explanation of how translog ties in with Lucene) + +(contrast Lucene vs ES flush / refresh / fsync) + +### Refresh for Read + +(internal vs external reader manager refreshes? flush vs refresh) + +### Reference Counting + +### Store + +(Data lives beyond a high level IndexShard instance. Continue to exist until all references to the Store go away, then Lucene data is removed) + +### Translog + +(Explain checkpointing and generations, when happens on Lucene flush / fsync) + +(Concurrency control for flushing) + +(VersionMap) + +#### Translog Truncation + +#### Direct Translog Read + +### Index Version + +### Lucene + +(copy a sketch of the files Lucene can have here and explain) + +(Explain about SearchIndexInput -- IndexWriter, IndexReader -- and the shared blob cache) + +(Lucene uses Directory, ES extends/overrides the Directory class to implement different forms of file storage. +Lucene contains a map of where all the data is located in files and offsites, and fetches it from various files. +ES doesn't just treat Lucene as a storage engine at the bottom (the end) of the stack. Rather ES has other information that +works in parallel with the storage engine.) + +#### Segment Merges + +# Recovery + +(All shards go through a 'recovery' process. Describe high level. createShard goes through this code.) + +(How is the translog involved in recovery?) + +### Create a Shard + +### Local Recovery + +### Peer Recovery + +### Snapshot Recovery + +### Recovery Across Server Restart + +(partial shard recoveries survive server restart? `reestablishRecovery`? How does that work.) + +### How a Recovery Method is Chosen + +# Data Tiers + +(Frozen, warm, hot, etc.) + +# Allocation + +(AllocationService runs on the master node) + +(Discuss different deciders that limit allocation. Sketch / list the different deciders that we have.) + +### APIs for Balancing Operations + +(Significant internal APIs for balancing a cluster) + +### Heuristics for Allocation + +### Cluster Reroute Command + +(How does this command behave with the desired auto balancer.) + +# Autoscaling + +(Reactive and proactive autoscaling. Explain that we surface recommendations, how control plane uses it.) + +(Sketch / list the different deciders that we have, and then also how we use information from each to make a recommendation.) + +# Snapshot / Restore + +(We've got some good package level documentation that should be linked here in the intro) + +(copy a sketch of the file system here, with explanation -- good reference) + +### Snapshot Repository + +### Creation of a Snapshot + +(Include an overview of the coordination between data and master nodes, which writes what and when) + +(Concurrency control: generation numbers, pending generation number, etc.) + +(partial snapshots) + +### Deletion of a Snapshot + +### Restoring a Snapshot + +### Detecting Multiple Writers to a Single Repository + +# Task Management / Tracking + +(How we identify operations/tasks in the system and report upon them. How we group operations via parent task ID.) + +### What Tasks Are Tracked + +### Tracking A Task Across Threads + +### Tracking A Task Across Nodes + +### Kill / Cancel A Task + +### Persistent Tasks + +# Cross Cluster Replication (CCR) + +(Brief explanation of the use case for CCR) + +(Explain how this works at a high level, and details of any significant components / ideas.) + +### Cross Cluster Search + +# Indexing / CRUD + +(Explain that the Distributed team is responsible for the write path, while the Search team owns the read path.) + +(Generating document IDs. Same across shard replicas, \_id field) + +(Sequence number: different than ID) + +### Reindex + +### Locking + +(what limits write concurrency, and how do we minimize) + +### Soft Deletes + +### Refresh + +(explain visibility of writes, and reference the Lucene section for more details (whatever makes more sense explained there)) + +# Server Startup + +# Server Shutdown + +### Closing a Shard + +(this can also happen during shard reallocation, right? This might be a standalone topic, or need another section about it in allocation?...) diff --git a/docs/internal/GeneralArchitectureGuide.md b/docs/internal/GeneralArchitectureGuide.md new file mode 100644 index 0000000000000..f865277d07f8f --- /dev/null +++ b/docs/internal/GeneralArchitectureGuide.md @@ -0,0 +1,25 @@ +# General Architecture + +## Transport Actions + +## Serializations + +## Settings + +## Deprecations + +## Plugins + +(what warrants a plugin?) + +(what plugins do we have?) + +## Testing + +(Overview of our testing frameworks. Discuss base test classes.) + +### Unit Testing + +### REST Testing + +### Integration Testing diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 192d6e795893b..a055c278b41d9 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -246,7 +246,7 @@ on. `list_executed_pipelines`:: (Optional, Boolean) If `true`, the response will include the ingest pipelines that -were executed for each `index` or ``create`. +were executed for each `index` or `create`. Defaults to `false`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=pipeline] diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index f3b3dd824fb22..94bd38cd0ec28 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -32,7 +32,7 @@ include::processing-commands/limit.asciidoc[tag=limitation] * `long` * `null` * `text` -* `unsigned_long` +* experimental:[] `unsigned_long` * `version` * Spatial types ** `geo_point` @@ -73,6 +73,16 @@ unsupported type is not explicitly used in a query, it is returned with `null` values, with the exception of nested fields. Nested fields are not returned at all. +[discrete] +[[esql-_source-availability]] +=== _source availability + +{esql} does not support configurations where the +<> is <>. + +experimental:[] {esql}'s support for <> +is currently experimental. + [discrete] [[esql-limitations-full-text-search]] === Full-text search is not supported diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index e1e27be12a36f..d7fa25a5a8d4f 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -67,6 +67,10 @@ precedence. `false`. The API only supports this parameter for CBOR, JSON, SMILE, and YAML responses. See <>. +`locale`:: +(Optional, string) Returns results (especially dates) formatted per the conventions of the locale. +For syntax, refer to <>. + `params`:: (Optional, array) Values for parameters in the `query`. For syntax, refer to <>. diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index d66ceb2eb4f1e..fc06cfea904af 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -204,6 +204,33 @@ Which returns: } ---- +[discrete] +[[esql-locale-param]] +==== Returning localized results + +Use the `locale` parameter in the request body to return results (especially dates) formatted per the conventions of the locale. +If `locale` is not specified, defaults to `en-US` (English). +Refer to https://www.oracle.com/java/technologies/javase/jdk17-suported-locales.html[JDK Supported Locales]. + +Syntax: the `locale` parameter accepts language tags in the (case-insensitive) format `xy` and `xy-XY`. + +For example, to return a month name in French: + +[source,console] +---- +POST /_query +{ + "locale": "fr-FR", + "query": """ + ROW birth_date_string = "2023-01-15T00:00:00.000Z" + | EVAL birth_date = date_parse(birth_date_string) + | EVAL month_of_birth = DATE_FORMAT("MMMM",birth_date) + | LIMIT 5 + """ +} +---- +// TEST[setup:library] + [discrete] [[esql-rest-params]] ==== Passing parameters to a query diff --git a/docs/reference/esql/functions/auto_bucket.asciidoc b/docs/reference/esql/functions/auto_bucket.asciidoc index aedfdaa7c0e12..651ac168aa83a 100644 --- a/docs/reference/esql/functions/auto_bucket.asciidoc +++ b/docs/reference/esql/functions/auto_bucket.asciidoc @@ -2,6 +2,8 @@ [[esql-auto_bucket]] === `AUTO_BUCKET` +experimental::[] + *Syntax* [source,esql] diff --git a/docs/reference/esql/functions/date-time-functions.asciidoc b/docs/reference/esql/functions/date-time-functions.asciidoc index f90bc007f744e..e9d6628c63894 100644 --- a/docs/reference/esql/functions/date-time-functions.asciidoc +++ b/docs/reference/esql/functions/date-time-functions.asciidoc @@ -8,7 +8,7 @@ {esql} supports these date-time functions: // tag::date_list[] -* <> +* experimental:[] <> * <> * <> * <> diff --git a/docs/reference/esql/functions/log.asciidoc b/docs/reference/esql/functions/log.asciidoc new file mode 100644 index 0000000000000..79ea72898bc2f --- /dev/null +++ b/docs/reference/esql/functions/log.asciidoc @@ -0,0 +1,48 @@ +[discrete] +[[esql-log]] +=== `LOG` + +*Syntax* + +[source,esql] +---- +LOG([base,] value) +---- + +*Parameters* + +`base`:: +Numeric expression. If `null`, the function returns `null`. The base is an optional input parameter. If a base is not provided, this function returns the natural logarithm (base e) of a value. + +`value`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the logarithm of a value to a base. The input can be any numeric value, the return value is always a double. + +Logs of zero, negative numbers, infinites and base of one return `null` as well as a warning. + +*Supported types* + +include::types/log.asciidoc[] + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/math.csv-spec[tag=log] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/math.csv-spec[tag=log-result] +|=== + +[source.merge.styled,esql] +---- +include::{esql-specs}/math.csv-spec[tag=logUnary] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/math.csv-spec[tag=logUnary-result] +|=== diff --git a/docs/reference/esql/functions/math-functions.asciidoc b/docs/reference/esql/functions/math-functions.asciidoc index 21131ae9074d7..0ddf7412db2a1 100644 --- a/docs/reference/esql/functions/math-functions.asciidoc +++ b/docs/reference/esql/functions/math-functions.asciidoc @@ -18,6 +18,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -40,6 +41,7 @@ include::cos.asciidoc[] include::cosh.asciidoc[] include::e.asciidoc[] include::floor.asciidoc[] +include::log.asciidoc[] include::log10.asciidoc[] include::pi.asciidoc[] include::pow.asciidoc[] diff --git a/docs/reference/esql/functions/signature/log.svg b/docs/reference/esql/functions/signature/log.svg new file mode 100644 index 0000000000000..39a9a7e8dc52e --- /dev/null +++ b/docs/reference/esql/functions/signature/log.svg @@ -0,0 +1 @@ +LOG(base,value) \ No newline at end of file diff --git a/docs/reference/esql/functions/to_unsigned_long.asciidoc b/docs/reference/esql/functions/to_unsigned_long.asciidoc index a4a6cfd54ed6f..f6b8d8cf7a973 100644 --- a/docs/reference/esql/functions/to_unsigned_long.asciidoc +++ b/docs/reference/esql/functions/to_unsigned_long.asciidoc @@ -2,6 +2,8 @@ [[esql-to_unsigned_long]] === `TO_UNSIGNED_LONG` +experimental::[] + *Aliases* `TO_ULONG`, `TO_UL` diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc index 611e1f7fddfb4..eb19f1968ebde 100644 --- a/docs/reference/esql/functions/type-conversion-functions.asciidoc +++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc @@ -21,7 +21,7 @@ * <> * <> * <> -* <> +* experimental:[] <> * <> // end::type_list[] diff --git a/docs/reference/esql/functions/types/log.asciidoc b/docs/reference/esql/functions/types/log.asciidoc new file mode 100644 index 0000000000000..d72ea848c349f --- /dev/null +++ b/docs/reference/esql/functions/types/log.asciidoc @@ -0,0 +1,20 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +base | value | result +double | double | double +double | integer | double +double | long | double +double | unsigned_long | double +integer | double | double +integer | integer | double +integer | long | double +integer | unsigned_long | double +long | double | double +long | integer | double +long | long | double +long | unsigned_long | double +unsigned_long | double | double +unsigned_long | integer | double +unsigned_long | long | double +unsigned_long | unsigned_long | double +|=== diff --git a/docs/reference/esql/metadata-fields.asciidoc b/docs/reference/esql/metadata-fields.asciidoc index eb08ee085de38..f06c9cad26f12 100644 --- a/docs/reference/esql/metadata-fields.asciidoc +++ b/docs/reference/esql/metadata-fields.asciidoc @@ -22,15 +22,15 @@ to be provided with a dedicated directive: [source,esql] ---- -FROM index [METADATA _index, _id] +FROM index METADATA _index, _id ---- Metadata fields are only available if the source of the data is an index. Consequently, `FROM` is the only source commands that supports the `METADATA` directive. -Once enabled, the fields are then available to subsequent processing commands, just -like the other index fields: +Once enabled, these fields will be available to subsequent processing commands, just +like other index fields: [source.merge.styled,esql] ---- @@ -41,9 +41,9 @@ include::{esql-specs}/metadata-IT_tests_only.csv-spec[tag=multipleIndices] include::{esql-specs}/metadata-IT_tests_only.csv-spec[tag=multipleIndices-result] |=== -Also, similar to the index fields, once an aggregation is performed, a +Similar to index fields, once an aggregation is performed, a metadata field will no longer be accessible to subsequent commands, unless -used as grouping field: +used as a grouping field: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/processing-commands/drop.asciidoc b/docs/reference/esql/processing-commands/drop.asciidoc index 4787c5f137314..8f03141d5e05a 100644 --- a/docs/reference/esql/processing-commands/drop.asciidoc +++ b/docs/reference/esql/processing-commands/drop.asciidoc @@ -22,7 +22,7 @@ The `DROP` processing command removes one or more columns. [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=dropheight] +include::{esql-specs}/drop.csv-spec[tag=height] ---- Rather than specify each column by name, you can use wildcards to drop all @@ -30,5 +30,5 @@ columns with a name that matches a pattern: [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=dropheightwithwildcard] +include::{esql-specs}/drop.csv-spec[tag=heightWithWildcard] ---- diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc index 7515583b1bfd1..57f32a68aec4c 100644 --- a/docs/reference/esql/processing-commands/keep.asciidoc +++ b/docs/reference/esql/processing-commands/keep.asciidoc @@ -10,6 +10,7 @@ KEEP columns ---- *Parameters* + `columns`:: A comma-separated list of columns to keep. Supports wildcards. @@ -18,6 +19,17 @@ A comma-separated list of columns to keep. Supports wildcards. The `KEEP` processing command enables you to specify what columns are returned and the order in which they are returned. +Precedence rules are applied when a field name matches multiple expressions. +Fields are added in the order they appear. If one field matches multiple expressions, the following precedence rules apply (from highest to lowest priority): + +1. Complete field name (no wildcards) +2. Partial wildcard expressions (for example: `fieldNam*`) +3. Wildcard only (`*`) + +If a field matches two expressions with the same precedence, the right-most expression wins. + +Refer to the examples for illustrations of these precedence rules. + *Examples* The columns are returned in the specified order: @@ -38,12 +50,58 @@ columns with a name that matches a pattern: ---- include::{esql-specs}/docs.csv-spec[tag=keepWildcard] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=keep-wildcard-result] +|=== The asterisk wildcard (`*`) by itself translates to all columns that do not -match the other arguments. This query will first return all columns with a name +match the other arguments. + +This query will first return all columns with a name that starts with `h`, followed by all other columns: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=keepDoubleWildcard] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=keep-double-wildcard-result] +|=== + +The following examples show how precedence rules work when a field name matches multiple expressions. + +Complete field name has precedence over wildcard expressions: + +[source,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=keepCompleteName] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=keep-complete-name-result] +|=== + +Wildcard expressions have the same priority, but last one wins (despite being less specific): + +[source,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=keepWildcardPrecedence] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=keep-wildcard-precedence-result] +|=== + +A simple wildcard expression `*` has the lowest precedence. +Output order is determined by the other arguments: + +[source,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=keepWildcardLowest] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=keep-wildcard-lowest-result] +|=== diff --git a/docs/reference/esql/processing-commands/limit.asciidoc b/docs/reference/esql/processing-commands/limit.asciidoc index 5f659fc493a75..4ccf3024a4c1e 100644 --- a/docs/reference/esql/processing-commands/limit.asciidoc +++ b/docs/reference/esql/processing-commands/limit.asciidoc @@ -43,5 +43,5 @@ settings: [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=limit] +include::{esql-specs}/limit.csv-spec[tag=basic] ---- diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index 6f54a42ddad35..5263a17b48df9 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -66,9 +66,9 @@ or aliases: FROM employees-00001,other-employees-* ---- -Use the `METADATA` directive to enable <>: +Use the optional `METADATA` directive to enable <>: [source,esql] ---- -FROM employees [METADATA _id] +FROM employees METADATA _id ---- diff --git a/docs/reference/esql/source-commands/show.asciidoc b/docs/reference/esql/source-commands/show.asciidoc index ea8c83ceb772a..1913c60660b93 100644 --- a/docs/reference/esql/source-commands/show.asciidoc +++ b/docs/reference/esql/source-commands/show.asciidoc @@ -12,7 +12,7 @@ SHOW item *Parameters* `item`:: -Can be `INFO` or `FUNCTIONS`. +Can be `INFO` or experimental:[] `FUNCTIONS`. *Description* @@ -20,7 +20,7 @@ The `SHOW` source command returns information about the deployment and its capabilities: * Use `SHOW INFO` to return the deployment's version, build date and hash. -* Use `SHOW FUNCTIONS` to return a list of all supported functions and a +* Use experimental:[] `SHOW FUNCTIONS` to return a list of all supported functions and a synopsis of each function. *Examples* diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 10858a8ab0744..eaef54a1effb1 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -20,6 +20,7 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> * <> [discrete] @@ -130,6 +131,7 @@ include::indices/dangling-indices-list.asciidoc[] include::indices/open-close.asciidoc[] include::indices/refresh.asciidoc[] include::indices/resolve.asciidoc[] +include::indices/resolve-cluster.asciidoc[] include::indices/rollover-index.asciidoc[] include::indices/shrink-index.asciidoc[] include::indices/simulate-index.asciidoc[] diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 1f0a79258bd37..25d39a17af306 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -81,7 +81,7 @@ Defaults to `open`. If `true`, the request forces a flush even if there are no changes to commit to the index. -Defaults to `true`. +Defaults to `false`. You can use this parameter to increment the generation number of the transaction log. diff --git a/docs/reference/indices/resolve-cluster.asciidoc b/docs/reference/indices/resolve-cluster.asciidoc new file mode 100644 index 0000000000000..8fa53bfc27056 --- /dev/null +++ b/docs/reference/indices/resolve-cluster.asciidoc @@ -0,0 +1,269 @@ +[[indices-resolve-cluster-api]] +=== Resolve cluster API +++++ +Resolve cluster +++++ + +Resolves the specified index expressions to return information about +each cluster, including the local cluster, if included. + +This endpoint is useful before doing a <> in +order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster +search. Index and <> are also supported +with this endpoint. + +For each cluster in the index expression, information is returned about: + +1. whether the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope +2. whether each remote cluster is configured with `skip_unavailable` as `true` or `false` +3. whether there are any indices, aliases or data streams on that cluster that match + the index expression +4. whether the search is likely to have errors returned when you do the {ccs} (including any + authorization errors if your user does not have permission to query the index) +5. cluster version information, including the Elasticsearch server version + +//// +[source,console] +-------------------------------- +PUT _cluster/settings +{ + "persistent": { + "cluster": { + "remote": { + "cluster_one": { + "seeds": [ + "35.238.149.1:9300" + ], + "skip_unavailable": true + }, + "cluster_two": { + "seeds": [ + "35.238.149.2:9300" + ], + "skip_unavailable": false + } + } + } + } +} +-------------------------------- +// TEST[setup:host] +// TEST[s/35.238.149.\d+:930\d+/\${transport_host}/] +//// + +[source,console] +---- +GET /_resolve/cluster/my-index-*,cluster*:my-index-* +---- +// TEST[continued] + +This will return information about the local cluster and all remotely configured +clusters that start with the alias `cluster*`. Each cluster will return information +about whether it has any indices, aliases or data streams that match `my-index-*`. + +[[resolve-cluster-api-request]] +==== {api-request-title} + +`GET /_resolve/cluster/` + +[[resolve-cluster-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`view_index_metadata`, `read`, or `manage` <> for the target data stream, index, or alias. + +[[resolve-cluster-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +-- +(Required, string) Comma-separated name(s) or index pattern(s) of the +indices, aliases, and data streams to resolve, using <>. +Resources on <> can be specified using the +`:` syntax. +-- + +[[resolve-cluster-api-query-params]] +==== {api-query-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] ++ +Defaults to `false`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] ++ +Defaults to `true`. + +`ignore_throttled`:: +(Optional, Boolean) If `true`, concrete, expanded or aliased indices are +ignored when frozen. Defaults to `false`. ++ +deprecated:[7.16.0] + + +[discrete] +[[usecases-for-resolve-cluster]] +=== Advantages of using this endpoint before a {ccs} + +You may want to exclude a cluster or index from a search when: + +1. A remote cluster is not currently connected and is configured with `skip_unavailable`=`false`. +Executing a {ccs} under those conditions will cause +<>. + +2. A cluster has no matching indices, aliases or data streams for the index expression +(or your user does not have permissions to search them). For example, suppose your +index expression is `logs*,remote1:logs*` and the `remote1` cluster has no indices, aliases or data +streams that match `logs*`. In that case, that cluster will return no results from that cluster if +you include it in a {ccs}. + +3. The index expression (combined with any query parameters you specify) will likely cause an exception +to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response +will be present. (This is also where security/permission errors will be shown.) + +4. A remote cluster is an older version that does not support the feature you want to +use in your search. + + +[[resolve-cluster-api-example]] +==== {api-examples-title} + +[source,console] +---- +GET /_resolve/cluster/my-index*,clust*:my-index* +---- +// TEST[continued] +// TEST[setup:my_index] + +The API returns the following response: + +[source,console-result] +---- +{ + "(local)": { <1> + "connected": true, + "skip_unavailable": false, + "matching_indices": true, + "version": { + "number": "8.13.0", + "build_flavor": "default", + "minimum_wire_compatibility_version": "7.17.0", + "minimum_index_compatibility_version": "7.0.0" + } + }, + "cluster_one": { + "connected": true, <2> + "skip_unavailable": true, <3> + "matching_indices": true, <4> + "version": { + "number": "8.13.0", <5> + "build_flavor": "default", + "minimum_wire_compatibility_version": "7.17.0", + "minimum_index_compatibility_version": "7.0.0" + } + }, + "cluster_two": { + "connected": true, + "skip_unavailable": false, + "matching_indices": true, + "version": { + "number": "8.13.0", + "build_flavor": "default", + "minimum_wire_compatibility_version": "7.17.0", + "minimum_index_compatibility_version": "7.0.0" + } + } +} +---- +// TESTRESPONSE[s/"number": "8.13.0"/"number": "$body.$_path"/] +// TESTRESPONSE[s/"minimum_wire_compatibility_version": "7.17.0"/"minimum_wire_compatibility_version": "$body.$_path"/] +// TESTRESPONSE[s/"minimum_index_compatibility_version": "7.0.0"/"minimum_index_compatibility_version": "$body.$_path"/] + +<1> Each cluster has its own response section. The cluster you sent the request to is labelled as "(local)". +<2> The querying cluster attempts to make a request to each remote cluster. If successful, `connected`=`true`. +<3> The `skip_unavailable` setting for each remote cluster, as configured on the local cluster. +<4> Indicates whether any index, alias or data stream matches the index expression specified for that cluster. +<5> The Elasticsearch server version. + + +[discrete] +[[resolve-cluster-api-error-example]] +==== Identifying potential problems with your {ccs} + +The following request shows several examples of how modifying your query can +prevent search failures. + +[source,console] +-------------------------------------------------- +GET /_resolve/cluster/not-present,clust*:my-index*,oldcluster:*?ignore_unavailable=false +-------------------------------------------------- +// TEST[continued] +// TEST[s/,oldcluster:*//] + +[source,console-result] +-------------------------------------------------- +{ + "(local)": { + "connected": true, + "skip_unavailable": false, + "error": "no such index [not_present]" <1> + }, + "cluster_one": { + "connected": true, + "skip_unavailable": true, + "matching_indices": false, <2> + "version": { + "number": "8.13.0", + "build_flavor": "default", + "minimum_wire_compatibility_version": "7.17.0", + "minimum_index_compatibility_version": "7.0.0" + } + }, + "cluster_two": { + "connected": false, <3> + "skip_unavailable": false, + "matching_indices": true, + "version": { + "number": "8.13.0", + "build_flavor": "default", + "minimum_wire_compatibility_version": "7.17.0", + "minimum_index_compatibility_version": "7.0.0" + } + }, + "oldcluster": { <4> + "connected": true, + "skip_unavailable": false, + "matching_indices": true + } +} +-------------------------------------------------- +// TEST[skip: too many illustrative error variations to reproduce] + +<1> The local cluster has no index called `not_present`. Searching against it +using the specified `ignore_unavailable=false` param will return a "no such +index" error. Other types of errors can show up here as well, such as security +permission errors when the user does not have authorization to search the +specified index. +<2> The `cluster_one` remote cluster has no indices that match the pattern +`my-index*`. There may be no indices that match the pattern or the index +could be closed. (You can check this by using the +<> API.) +<3> The `cluster_two` remote cluster is not connected (the attempt to connect +failed). Since this cluster is marked as `skip_unavailable=false`, you should +probably exclude this cluster from the search by adding `-cluster_two:*` to the +search index expression. +<4> The `oldcluster` remote cluster shows that it has matching indices, but no +version information is included. This indicates that the cluster version predates +the introduction of the `_resolve/cluster` API in 8.13.0., so you may want to +exclude it from your {ccs}. (Note: the endpoint was able to tell there were +matching indices because it fell back to using the <> API.) diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index 692a96212f5ca..850b4ef1b10b0 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -6,9 +6,9 @@ experimental[] Deletes an {infer} model deployment. -IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, -OpenAI, or Hugging Face, in your cluster. This is not the same feature that you -can use on an ML node with custom {ml} models. If you want to train and use your +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your own model, use the <>. @@ -16,6 +16,7 @@ own model, use the <>. [[delete-inference-api-request]] ==== {api-request-title} +`DELETE /_inference/` `DELETE /_inference//` [discrete] @@ -34,7 +35,7 @@ own model, use the <>. The unique identifier of the {infer} model to delete. :: -(Required, string) +(Optional, string) The type of {infer} task that the model performs. @@ -42,7 +43,7 @@ The type of {infer} task that the model performs. [[delete-inference-api-example]] ==== {api-examples-title} -The following API call deletes the `my-elser-model` {infer} model that can +The following API call deletes the `my-elser-model` {infer} model that can perform `sparse_embedding` tasks. @@ -61,4 +62,4 @@ The API returns the following response: "acknowledged": true } ------------------------------------------------------------ -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index 45f4cb67e7674..176909bc5458f 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -6,9 +6,9 @@ experimental[] Retrieves {infer} model information. -IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, -OpenAI, or Hugging Face, in your cluster. This is not the same feature that you -can use on an ML node with custom {ml} models. If you want to train and use your +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your own model, use the <>. @@ -18,6 +18,8 @@ own model, use the <>. `GET /_inference/_all` +`GET /_inference/` + `GET /_inference//_all` `GET /_inference//` diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index 9ef633160f162..4fb6ea5a4fb6d 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -6,9 +6,9 @@ experimental[] Performs an inference task on an input text by using an {infer} model. -IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, -OpenAI, or Hugging Face, in your cluster. This is not the same feature that you -can use on an ML node with custom {ml} models. If you want to train and use your +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your own model, use the <>. @@ -16,6 +16,7 @@ own model, use the <>. [[post-inference-api-request]] ==== {api-request-title} +`POST /_inference/` `POST /_inference//` @@ -46,7 +47,7 @@ The unique identifier of the {infer} model. ``:: -(Required, string) +(Optional, string) The type of {infer} task that the model performs. diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 1d097c91bbedf..dc0f5615bf0eb 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -35,9 +35,10 @@ perform a specific {infer} task. The following services are available through the {infer} API: +* Cohere * ELSER -* OpenAI * Hugging Face +* OpenAI [discrete] @@ -64,16 +65,47 @@ The type of the {infer} task that the model will perform. Available task types: (Required, string) The type of service supported for the specified task type. Available services: +* `cohere`: specify the `text_embedding` task type to use the Cohere service. * `elser`: specify the `sparse_embedding` task type to use the ELSER service. -* `openai`: specify the `text_embedding` task type to use the OpenAI service. * `hugging_face`: specify the `text_embedding` task type to use the Hugging Face service. +* `openai`: specify the `text_embedding` task type to use the OpenAI service. `service_settings`:: (Required, object) Settings used to install the {infer} model. These settings are specific to the `service` you specified. + +.`service_settings` for `cohere` +[%collapsible%closed] +===== +`api_key`::: +(Required, string) +A valid API key of your Cohere account. You can find your Cohere API keys or you +can create a new one +https://dashboard.cohere.com/api-keys[on the API keys settings page]. + +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with +the same name and the updated API key. + +`embedding_type`:: +(Optional, string) +Specifies the types of embeddings you want to get back. Defaults to `float`. +Valid values are: + * `float`: use it for the default float embeddings. + * `int8`: use it for signed int8 embeddings. + +`model_id`:: +(Optional, string) +The name of the model to use for the {infer} task. To review the available +models, refer to the +https://docs.cohere.com/reference/embed[Cohere docs]. Defaults to +`embed-english-v2.0`. +===== ++ .`service_settings` for `elser` [%collapsible%closed] ===== @@ -86,14 +118,14 @@ The number of model allocations to create. The number of threads to use by each model allocation. ===== + -.`service_settings` for `openai` +.`service_settings` for `hugging_face` [%collapsible%closed] ===== `api_key`::: (Required, string) -A valid API key of your OpenAI account. You can find your OpenAI API keys in -your OpenAI account under the -https://platform.openai.com/api-keys[API keys section]. +A valid access token of your Hugging Face account. You can find your Hugging +Face access tokens or you can create a new one +https://huggingface.co/settings/tokens[on the settings page]. IMPORTANT: You need to provide the API key only once, during the {infer} model creation. The <> does not retrieve your API key. After @@ -101,26 +133,19 @@ creating the {infer} model, you cannot change the associated API key. If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. -`organization_id`::: -(Optional, string) -The unique identifier of your organization. You can find the Organization ID in -your OpenAI account under -https://platform.openai.com/account/organization[**Settings** > **Organizations**]. - `url`::: -(Optional, string) -The URL endpoint to use for the requests. Can be changed for testing purposes. -Defaults to `https://api.openai.com/v1/embeddings`. +(Required, string) +The URL endpoint to use for the requests. ===== + -.`service_settings` for `hugging_face` +.`service_settings` for `openai` [%collapsible%closed] ===== `api_key`::: (Required, string) -A valid access token of your Hugging Face account. You can find your Hugging -Face access tokens or you can create a new one -https://huggingface.co/settings/tokens[on the settings page]. +A valid API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. IMPORTANT: You need to provide the API key only once, during the {infer} model creation. The <> does not retrieve your API key. After @@ -128,9 +153,16 @@ creating the {infer} model, you cannot change the associated API key. If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. +`organization_id`::: +(Optional, string) +The unique identifier of your organization. You can find the Organization ID in +your OpenAI account under +https://platform.openai.com/account/organization[**Settings** > **Organizations**]. + `url`::: -(Required, string) -The URL endpoint to use for the requests. +(Optional, string) +The URL endpoint to use for the requests. Can be changed for testing purposes. +Defaults to `https://api.openai.com/v1/embeddings`. ===== `task_settings`:: @@ -141,11 +173,33 @@ Settings to configure the {infer} task. These settings are specific to the .`task_settings` for `text_embedding` [%collapsible%closed] ===== +`input_type`::: +(optional, string) +For `cohere` service only. Specifies the type of input passed to the model. +Valid values are: + * `classification`: use it for embeddings passed through a text classifier. + * `clusterning`: use it for the embeddings run through a clustering algorithm. + * `ingest`: use it for storing document embeddings in a vector database. + * `search`: use it for storing embeddings of search queries run against a + vector data base to find relevant documents. + `model`::: (Optional, string) -The name of the model to use for the {infer} task. Refer to the +For `openai` sevice only. The name of the model to use for the {infer} task. Refer +to the https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI documentation] for the list of available text embedding models. + +`truncate`::: +(Optional, string) +For `cohere` service only. Specifies how the API handles inputs longer than the +maximum token length. Defaults to `END`. Valid values are: + * `NONE`: when the input exceeds the maximum input token length an error is + returned. + * `START`: when the input exceeds the maximum input token length the start of + the input is discarded. + * `END`: when the input exceeds the maximum input token length the end of + the input is discarded. ===== @@ -156,6 +210,30 @@ for the list of available text embedding models. This section contains example API calls for every service type. +[discrete] +[[inference-example-cohere]] +===== Cohere service + +The following example shows how to create an {infer} model called +`cohere_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/cohere-embeddings +{ + "service": "cohere", + "service_settings": { + "api_key": "", + "model": "embed-english-light-v3.0", + "embedding_type": "int8" + }, + "task_settings": { + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + + [discrete] [[inference-example-elser]] ===== ELSER service @@ -196,29 +274,6 @@ Example response: // NOTCONSOLE -[discrete] -[[inference-example-openai]] -===== OpenAI service - -The following example shows how to create an {infer} model called -`openai_embeddings` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/openai_embeddings -{ - "service": "openai", - "service_settings": { - "api_key": "" - }, - "task_settings": { - "model": "text-embedding-ada-002" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - - [discrete] [[inference-example-hugging-face]] ===== Hugging Face service @@ -247,4 +302,27 @@ https://ui.endpoints.huggingface.co/[the Hugging Face endpoint page] to get an endpoint URL. Select the model you want to use on the new endpoint creation page - for example `intfloat/e5-small-v2` - then select the `Sentence Embeddings` task under the Advanced configuration section. Create the endpoint. Copy the URL -after the endpoint initialization has been finished. \ No newline at end of file +after the endpoint initialization has been finished. + + +[discrete] +[[inference-example-openai]] +===== OpenAI service + +The following example shows how to create an {infer} model called +`openai_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai_embeddings +{ + "service": "openai", + "service_settings": { + "api_key": "" + }, + "task_settings": { + "model": "text-embedding-ada-002" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index 6f2cec356edb4..af89c265db2ca 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -7,8 +7,8 @@ dynamic mapping by setting the dynamic parameter to `true` or `runtime`. You can then use dynamic templates to define custom mappings that can be applied to dynamically added fields based on the matching condition: -* <> operates on the data type that -{es} detects +* <> +operate on the data type that {es} detects * <> use a pattern to match on the field name * <> operate on the full @@ -116,10 +116,13 @@ See <> for how to use dynamic templates to map `string` fields as either indexed fields or runtime fields. [[match-mapping-type]] -==== `match_mapping_type` +==== `match_mapping_type` and `unmatch_mapping_type` -The `match_mapping_type` is the data type detected by the JSON parser. Because -JSON doesn't distinguish a `long` from an `integer` or a `double` from +The `match_mapping_type` parameter matches fields by the data type detected by +the JSON parser, while `unmatch_mapping_type` excludes fields based on the data +type. + +Because JSON doesn't distinguish a `long` from an `integer` or a `double` from a `float`, any parsed floating point number is considered a `double` JSON data type, while any parsed `integer` number is considered a `long`. @@ -132,7 +135,10 @@ which is why `"dynamic":"runtime"` uses `double`. include::field-mapping.asciidoc[tag=dynamic-field-mapping-types-tag] -Use a wildcard (`*`) to match all data types. +You can specify either a single data type or a list of data types for either +the `match_mapping_type` or `unmatch_mapping_type` parameters. You can also +use a wildcard (`*`) for the `match_mapping_type` parameter to match all +data types. For example, if we wanted to map all integer fields as `integer` instead of `long`, and all `string` fields as both `text` and `keyword`, we @@ -144,6 +150,16 @@ PUT my-index-000001 { "mappings": { "dynamic_templates": [ + { + "numeric_counts": { + "match_mapping_type": ["long", "double"], + "match": "count", + "mapping": { + "type": "{dynamic_type}", + "index": false + } + } + }, { "integers": { "match_mapping_type": "long", @@ -165,6 +181,15 @@ PUT my-index-000001 } } } + }, + { + "non_objects_keyword": { + "match_mapping_type": "*", + "unmatch_mapping_type": "object", + "mapping": { + "type": "keyword" + } + } } ] } @@ -173,12 +198,16 @@ PUT my-index-000001 PUT my-index-000001/_doc/1 { "my_integer": 5, <1> - "my_string": "Some string" <2> + "my_string": "Some string", <2> + "my_boolean": "false", <3> + "field": {"count": 4} <4> } -------------------------------------------------- <1> The `my_integer` field is mapped as an `integer`. <2> The `my_string` field is mapped as a `text`, with a `keyword` <>. +<3> The `my_boolean` field is mapped as a `keyword`. +<4> The `field.count` field is mapped as a `long`. [[match-unmatch]] ==== `match` and `unmatch` diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index a2ab44a173a62..d600bc5566ace 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -238,21 +238,31 @@ expense of slower indexing speed. ==== `type`::: (Required, string) -The type of kNN algorithm to use. Can be either `hnsw` or `int8_hnsw`. - +The type of kNN algorithm to use. Can be either any of: ++ +-- +* `hnsw` - The default storage type. This utilizes the https://arxiv.org/abs/1603.09320[HNSW algorithm] for scalable + approximate kNN search. This supports all `element_type` values. +* `int8_hnsw` - This utilizes the https://arxiv.org/abs/1603.09320[HNSW algorithm] in addition to automatically scalar +quantization for scalable approximate kNN search with `element_type` of `float`. This can reduce the memory footprint +by 4x at the cost of some accuracy. See <>. +* `flat` - This utilizes a brute-force search algorithm for exact kNN search. This supports all `element_type` values. +* `int8_flat` - This utilizes a brute-force search algorithm in addition to automatically scalar quantization. Only supports +`element_type` of `float`. +-- `m`::: (Optional, integer) The number of neighbors each node will be connected to in the HNSW graph. -Defaults to `16`. +Defaults to `16`. Only applicable to `hnsw` and `int8_hnsw` index types. `ef_construction`::: (Optional, integer) The number of candidates to track while assembling the list of nearest -neighbors for each new node. Defaults to `100`. +neighbors for each new node. Defaults to `100`. Only applicable to `hnsw` and `int8_hnsw` index types. `confidence_interval`::: (Optional, float) -Only applicable to `int8_hnsw` index types. The confidence interval to use when quantizing the vectors, +Only applicable to `int8_hnsw` and `int8_flat` index types. The confidence interval to use when quantizing the vectors, can be any value between and including `0.90` and `1.0`. This value restricts the values used when calculating the quantization thresholds. For example, a value of `0.95` will only use the middle 95% of the values when calculating the quantization thresholds (e.g. the highest and lowest 2.5% of values will be ignored). diff --git a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc index a8fb195e7728d..73ec6966ec049 100644 --- a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc @@ -179,7 +179,7 @@ keystore: ./bin/elasticsearch-keystore add cluster.remote.ALIAS.credentials ---- + -Replace `ALIAS` with the alias you will use to connect to the remote cluster +Replace `ALIAS` with the same name that you will use to create the remote cluster entry later. When prompted, enter the encoded cross-cluster API key created on the remote cluster earlier. @@ -192,4 +192,4 @@ remote cluster earlier. include::remote-clusters-connect.asciidoc[] :!trust-mechanism: -include::{es-repo-dir}/security/authentication/remote-clusters-privileges-api-key.asciidoc[leveloffset=+1] \ No newline at end of file +include::{es-repo-dir}/security/authentication/remote-clusters-privileges-api-key.asciidoc[leveloffset=+1] diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index 5a7aa43155c66..1e425c77d1264 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -22,37 +22,55 @@ one of the active allocation ids in the cluster state. -- +[[cluster-routing-allocation-same-shard-host]] +`cluster.routing.allocation.same_shard.host`:: + (<>) + If `true`, forbids multiple copies of a shard from being allocated to + distinct nodes on the same host, i.e. which have the same network + address. Defaults to `false`, meaning that copies of a shard may + sometimes be allocated to nodes on the same host. This setting is only + relevant if you run multiple nodes on each host. + `cluster.routing.allocation.node_concurrent_incoming_recoveries`:: (<>) - How many concurrent incoming shard recoveries are allowed to happen on a node. Incoming recoveries are the recoveries - where the target shard (most likely the replica unless a shard is relocating) is allocated on the node. Defaults to `2`. + How many concurrent incoming shard recoveries are allowed to happen on a + node. Incoming recoveries are the recoveries where the target shard (most + likely the replica unless a shard is relocating) is allocated on the node. + Defaults to `2`. Increasing this setting may cause shard movements to have + a performance impact on other activity in your cluster, but may not make + shard movements complete noticeably sooner. We do not recommend adjusting + this setting from its default of `2`. `cluster.routing.allocation.node_concurrent_outgoing_recoveries`:: (<>) - How many concurrent outgoing shard recoveries are allowed to happen on a node. Outgoing recoveries are the recoveries - where the source shard (most likely the primary unless a shard is relocating) is allocated on the node. Defaults to `2`. + How many concurrent outgoing shard recoveries are allowed to happen on a + node. Outgoing recoveries are the recoveries where the source shard (most + likely the primary unless a shard is relocating) is allocated on the node. + Defaults to `2`. Increasing this setting may cause shard movements to have + a performance impact on other activity in your cluster, but may not make + shard movements complete noticeably sooner. We do not recommend adjusting + this setting from its default of `2`. `cluster.routing.allocation.node_concurrent_recoveries`:: (<>) - A shortcut to set both `cluster.routing.allocation.node_concurrent_incoming_recoveries` and - `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. Defaults to 2. - + A shortcut to set both + `cluster.routing.allocation.node_concurrent_incoming_recoveries` and + `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. The + value of this setting takes effect only when the more specific setting is + not configured. Defaults to `2`. Increasing this setting may cause shard + movements to have a performance impact on other activity in your cluster, + but may not make shard movements complete noticeably sooner. We do not + recommend adjusting this setting from its default of `2`. `cluster.routing.allocation.node_initial_primaries_recoveries`:: - (<>) - While the recovery of replicas happens over the network, the recovery of - an unassigned primary after node restart uses data from the local disk. - These should be fast so more initial primary recoveries can happen in - parallel on the same node. Defaults to `4`. - -[[cluster-routing-allocation-same-shard-host]] -`cluster.routing.allocation.same_shard.host`:: - (<>) - If `true`, forbids multiple copies of a shard from being allocated to - distinct nodes on the same host, i.e. which have the same network - address. Defaults to `false`, meaning that copies of a shard may - sometimes be allocated to nodes on the same host. This setting is only - relevant if you run multiple nodes on each host. + (<>) + While the recovery of replicas happens over the network, the recovery of + an unassigned primary after node restart uses data from the local disk. + These should be fast so more initial primary recoveries can happen in + parallel on each node. Defaults to `4`. Increasing this setting may cause + shard recoveries to have a performance impact on other activity in your + cluster, but may not make shard recoveries complete noticeably sooner. We + do not recommend adjusting this setting from its default of `4`. [[shards-rebalancing-settings]] ==== Shard rebalancing settings @@ -73,38 +91,44 @@ balancer works independently within each tier. You can use the following settings to control the rebalancing of shards across the cluster: -`cluster.routing.rebalance.enable`:: +`cluster.routing.allocation.allow_rebalance`:: + -- (<>) -Enable or disable rebalancing for specific kinds of shards: +Specify when shard rebalancing is allowed: -* `all` - (default) Allows shard balancing for all kinds of shards. -* `primaries` - Allows shard balancing only for primary shards. -* `replicas` - Allows shard balancing only for replica shards. -* `none` - No shard balancing of any kind are allowed for any indices. + +* `always` - Always allow rebalancing. +* `indices_primaries_active` - Only when all primaries in the cluster are allocated. +* `indices_all_active` - (default) Only when all shards (primaries and replicas) in the cluster are allocated. -- -`cluster.routing.allocation.allow_rebalance`:: +`cluster.routing.rebalance.enable`:: + -- (<>) -Specify when shard rebalancing is allowed: +Enable or disable rebalancing for specific kinds of shards: +* `all` - (default) Allows shard balancing for all kinds of shards. +* `primaries` - Allows shard balancing only for primary shards. +* `replicas` - Allows shard balancing only for replica shards. +* `none` - No shard balancing of any kind are allowed for any indices. -* `always` - Always allow rebalancing. -* `indices_primaries_active` - Only when all primaries in the cluster are allocated. -* `indices_all_active` - (default) Only when all shards (primaries and replicas) in the cluster are allocated. +Rebalancing is important to ensure the cluster returns to a healthy and fully +resilient state after a disruption. If you adjust this setting, remember to set +it back to `all` as soon as possible. -- `cluster.routing.allocation.cluster_concurrent_rebalance`:: (<>) Defines the number of concurrent shard rebalances are allowed across the whole cluster. Defaults to `2`. Note that this setting only controls the number of -concurrent shard relocations due to imbalances in the cluster. This setting does -not limit shard relocations due to +concurrent shard relocations due to imbalances in the cluster. This setting +does not limit shard relocations due to <> or -<>. +<>. Increasing this setting may cause the +cluster to use additional resources moving shards between nodes, so we +generally do not recommend adjusting this setting from its default of `2`. `cluster.routing.allocation.type`:: + @@ -149,6 +173,12 @@ data stream have an estimated write load of zero. The following settings control how {es} combines these values into an overall measure of each node's weight. +`cluster.routing.allocation.balance.threshold`:: +(float, <>) +The minimum improvement in weight which triggers a rebalancing shard movement. +Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing +shards sooner, leaving the cluster in a more unbalanced state. + `cluster.routing.allocation.balance.shard`:: (float, <>) Defines the weight factor for the total number of shards allocated to each node. @@ -177,19 +207,25 @@ estimated number of indexing threads needed by the shard. Defaults to `10.0f`. Raising this value increases the tendency of {es} to equalize the total write load across nodes ahead of the other balancing variables. -`cluster.routing.allocation.balance.threshold`:: -(float, <>) -The minimum improvement in weight which triggers a rebalancing shard movement. -Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing -shards sooner, leaving the cluster in a more unbalanced state. - [NOTE] ==== -* It is not recommended to adjust the values of the heuristics settings. The -default values are generally good, and although different values may improve -the current balance, it is possible that they create problems in the future -if the cluster or workload changes. +* If you have a large cluster, it may be unnecessary to keep it in +a perfectly balanced state at all times. It is less resource-intensive for the +cluster to operate in a somewhat unbalanced state rather than to perform all +the shard movements needed to achieve the perfect balance. If so, increase the +value of `cluster.routing.allocation.balance.threshold` to define the +acceptable imbalance between nodes. For instance, if you have an average of 500 +shards per node and can accept a difference of 5% (25 typical shards) between +nodes, set `cluster.routing.allocation.balance.threshold` to `25`. + +* We do not recommend adjusting the values of the heuristic weight factor +settings. The default values work well in all reasonable clusters. Although +different values may improve the current balance in some ways, it is possible +that they will create unexpected problems in the future or prevent it from +gracefully handling an unexpected disruption. + * Regardless of the result of the balancing algorithm, rebalancing might not be allowed due to allocation rules such as forced awareness and allocation -filtering. +filtering. Use the <> API to explain the current +allocation of shards. ==== diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 9b115b4f1111f..4c24cd0dbadb7 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -13,7 +13,10 @@ address, a hostname, or a <>. Use this setting only if you require different configurations for the transport and HTTP interfaces. + -Defaults to the address given by `network.host`. +Defaults to the address given by `network.host`. However, note that +<> will add +`http.host: 0.0.0.0` to your `elasticsearch.yml` configuration file, which +overrides this default. `http.bind_host`:: (<>, string) diff --git a/docs/reference/modules/indices/recovery.asciidoc b/docs/reference/modules/indices/recovery.asciidoc index 02b70c69876ff..261c3d3fc3f24 100644 --- a/docs/reference/modules/indices/recovery.asciidoc +++ b/docs/reference/modules/indices/recovery.asciidoc @@ -38,8 +38,9 @@ This limit applies to each node separately. If multiple nodes in a cluster perform recoveries at the same time, the cluster's total recovery traffic may exceed this limit. + -If this limit is too high, ongoing recoveries may consume an excess of bandwidth -and other resources, which can destabilize the cluster. +If this limit is too high, ongoing recoveries may consume an excess of +bandwidth and other resources, which can have a performance impact on your +cluster and in extreme cases may destabilize it. + This is a dynamic setting, which means you can set it in each node's `elasticsearch.yml` config file and you can update it dynamically using the diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 367c0d3025eee..d5392a204299e 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -43,7 +43,9 @@ Sets the address of this node for both HTTP and transport traffic. The node will bind to this address and will also use it as its publish address. Accepts an IP address, a hostname, or a <>. + -Defaults to `_local_`. +Defaults to `_local_`. However, note that <> will add `http.host: 0.0.0.0` to your `elasticsearch.yml` +configuration file, which overrides this default for HTTP traffic. `http.port`:: (<>, integer) diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 1aebf005a64e3..669402c94e9bb 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -7,6 +7,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> @@ -60,6 +61,7 @@ This section summarizes the changes in each release. -- include::release-notes/8.13.0.asciidoc[] +include::release-notes/8.12.1.asciidoc[] include::release-notes/8.12.0.asciidoc[] include::release-notes/8.11.4.asciidoc[] include::release-notes/8.11.3.asciidoc[] diff --git a/docs/reference/release-notes/8.12.1.asciidoc b/docs/reference/release-notes/8.12.1.asciidoc new file mode 100644 index 0000000000000..9aa9a11b3bf02 --- /dev/null +++ b/docs/reference/release-notes/8.12.1.asciidoc @@ -0,0 +1,73 @@ +[[release-notes-8.12.1]] +== {es} version 8.12.1 + +Also see <>. + +[[bug-8.12.1]] +[float] +=== Bug fixes + +Allocation:: +* Improve `CANNOT_REBALANCE_CAN_ALLOCATE` explanation {es-pull}104904[#104904] + +Application:: +* [Connector API] Fix bug in configuration validation parser {es-pull}104198[#104198] +* [Connector API] Fix bug when triggering a sync job via API {es-pull}104802[#104802] +* [Profiling] Query in parallel on content nodes {es-pull}104600[#104600] + +Data streams:: +* Data streams fix failure store delete {es-pull}104281[#104281] +* Fix _alias/ returning non-matching data streams {es-pull}104145[#104145] (issue: {es-issue}96589[#96589]) + +Downsampling:: +* Downsampling supports `date_histogram` with tz {es-pull}103511[#103511] (issue: {es-issue}101309[#101309]) + +ES|QL:: +* Avoid execute ESQL planning on refresh thread {es-pull}104591[#104591] +* ESQL: Allow grouping by null blocks {es-pull}104523[#104523] +* ESQL: Fix `SearchStats#count(String)` to count values not rows {es-pull}104891[#104891] (issue: {es-issue}104795[#104795]) +* Limit concurrent shards per node for ESQL {es-pull}104832[#104832] (issue: {es-issue}103666[#103666]) +* Reduce the number of Evals `ReplaceMissingFieldWithNull` creates {es-pull}104586[#104586] (issue: {es-issue}104583[#104583]) + +Infra/Resiliency:: +* Limit nesting depth in Exception XContent {es-pull}103741[#103741] + +Ingest Node:: +* Better handling of async processor failures {es-pull}104289[#104289] (issue: {es-issue}101921[#101921]) +* Ingest correctly handle upsert operations and drop processors together {es-pull}104585[#104585] (issue: {es-issue}36746[#36746]) + +Machine Learning:: +* Add retry logic for 500 and 503 errors for OpenAI {es-pull}103819[#103819] +* Avoid possible datafeed infinite loop with filtering aggregations {es-pull}104722[#104722] (issue: {es-issue}104699[#104699]) +* [LTR] `FieldValueExtrator` - Checking if fetched values is empty {es-pull}104314[#104314] + +Network:: +* Fix lost headers with chunked responses {es-pull}104808[#104808] + +Search:: +* Don't throw error for remote shards that open PIT filtered out {es-pull}104288[#104288] (issue: {es-issue}102596[#102596]) + +Snapshot/Restore:: +* Fix deleting index during snapshot finalization {es-pull}103817[#103817] (issue: {es-issue}101029[#101029]) + +TSDB:: +* Fix `routing_path` when template has multiple `path_match` and multi-fields {es-pull}104418[#104418] (issue: {es-issue}104400[#104400]) + +Transform:: +* Fix bug when `latest` transform is used together with `from` parameter {es-pull}104606[#104606] (issue: {es-issue}104543[#104543]) + +[[deprecation-8.12.1]] +[float] +=== Deprecations + +Machine Learning:: +* Deprecate machine learning on Intel macOS {es-pull}104087[#104087] + +[[upgrade-8.12.1]] +[float] +=== Upgrades + +Search:: +* [8.12.1] Upgrade to Lucene 9.9.2 {es-pull}104761[#104761] (issue: {es-issue}104617[#104617]) + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index f5252ae6a884f..0452eca8fbfc9 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -27,13 +27,15 @@ Other versions: endif::[] -// The notable-highlights tag marks entries that -// should be featured in the Stack Installation and Upgrade Guide: // tag::notable-highlights[] -// [discrete] -// === Heading -// -// Description. + +[discrete] +[[ga_release_of_synonyms_api]] +=== GA Release of Synonyms API +Removes the beta label for the Synonyms API to make it GA. + +{es-pull}103223[#103223] + // end::notable-highlights[] diff --git a/docs/reference/rest-api/security/query-api-key.asciidoc b/docs/reference/rest-api/security/query-api-key.asciidoc index 394464dc21456..88fef9a21ff88 100644 --- a/docs/reference/rest-api/security/query-api-key.asciidoc +++ b/docs/reference/rest-api/security/query-api-key.asciidoc @@ -6,6 +6,117 @@ Query API key information ++++ +//// +[source,console] +---- +POST /_security/user/king +{ + "password" : "security-test-password", + "roles": [] +} +POST /_security/user/june +{ + "password" : "security-test-password", + "roles": [] +} +POST /_security/api_key/grant +{ + "grant_type": "password", + "username" : "king", + "password" : "security-test-password", + "api_key" : { + "name": "king-key-no-expire" + } +} +DELETE /_security/api_key +{ + "name" : "king-key-no-expire" +} +POST /_security/api_key/grant +{ + "grant_type": "password", + "username" : "king", + "password" : "security-test-password", + "api_key" : { + "name": "king-key-10", + "expiration": "10d" + } +} +POST /_security/api_key/grant +{ + "grant_type": "password", + "username" : "king", + "password" : "security-test-password", + "api_key" : { + "name": "king-key-100", + "expiration": "100d" + } +} +POST /_security/api_key/grant +{ + "grant_type": "password", + "username" : "june", + "password" : "security-test-password", + "api_key" : { + "name": "june-key-no-expire" + } +} +POST /_security/api_key/grant +{ + "grant_type": "password", + "username" : "june", + "password" : "security-test-password", + "api_key" : { + "name": "june-key-10", + "expiration": "10d" + } +} +POST /_security/api_key/grant +{ + "grant_type": "password", + "username" : "june", + "password" : "security-test-password", + "api_key" : { + "name": "june-key-100", + "expiration": "100d" + } +} +DELETE /_security/api_key +{ + "name" : "june-key-100" +} +---- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE /_security/user/king +DELETE /_security/user/june +DELETE /_security/api_key +{ + "name" : "king-key-no-expire" +} +DELETE /_security/api_key +{ + "name" : "king-key-10" +} +DELETE /_security/api_key +{ + "name" : "king-key-100" +} +DELETE /_security/api_key +{ + "name" : "june-key-no-expire" +} +DELETE /_security/api_key +{ + "name" : "june-key-10" +} +-------------------------------------------------- +// TEARDOWN + +//// + Retrieves information for API keys with <> in a <> fashion. @@ -49,7 +160,8 @@ its <> and the owner user's You can specify the following parameters in the request body: `query`:: -(Optional, string) A <> to filter which API keys to return. +(Optional, object) A <> to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including <>, <>, <>, <>, @@ -84,7 +196,7 @@ Name of the API key. Creation time of the API key in milliseconds. `expiration`:: -Expiration time of the API key in milliseconds. +Expiration time of the API key in milliseconds. This is `null` if the key was not configured to expire. `invalidated`:: Indicates whether the API key is invalidated. If `true`, the key is invalidated. @@ -111,6 +223,16 @@ simply mentioning `metadata` (not followed by any dot and sub-field name). NOTE: You cannot query the role descriptors of an API key. ==== +`aggs`:: +(Optional, object) Any <> to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: <>, +<>, <>, +<>, <>, +<>, <>, +<>, and <>. +Additionally, aggregations only run over the same subset of fields that `query` works with. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from] + By default, you cannot page through more than 10,000 hits using the `from` and @@ -119,6 +241,8 @@ By default, you cannot page through more than 10,000 hits using the `from` and `size`:: (Optional, integer) The number of hits to return. Must not be negative and defaults to `10`. +The `size` parameter can be set to `0`, in which case no API key matches are returned, +only the aggregation results. + By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the @@ -455,11 +579,25 @@ The response contains a list of matched API keys along with their sort values: <1> The first sort value is creation time, which is displayed in `date_time` <> as defined in the request <2> The second sort value is the API key name -You can use the following request to retrieve all valid API keys, i.e. not invalidated and not expired: -[source,js] +[[security-api-query-api-key-with-aggs-example]] +==== Aggregations Example + +For example, given 2 users "june" and "king", each owning 3 API keys: + +* one that never expires (invalidated for user "king") +* one that expires in 10 days +* and one that expires in 100 day (invalidated for user "june") + +the following request returns the names of valid (not expired and not invalidated) API keys +that expire soon (in 30 days time), grouped by owner username. + +===== Request + +[source,console] ---- -GET /_security/_query/api_key +POST /_security/_query/api_key { + "size": 0, "query": { "bool": { "must": { @@ -469,28 +607,166 @@ GET /_security/_query/api_key }, "should": [ <2> { - "range": { - "expiration": { - "gte": "now" + "range": { "expiration": { "gte": "now" } } + }, + { + "bool": { "must_not": { "exists": { "field": "expiration" } } } + } + ], + "minimum_should_match": 1 + } + }, + "aggs": { + "keys_by_username": { + "composite": { + "sources": [ { "usernames": { "terms": { "field": "username" } } } ] <3> + }, + "aggs": { + "expires_soon": { + "filter": { + "range": { "expiration": { "lte": "now+30d/d" } } <4> + }, + "aggs": { + "key_names": { "terms": { "field": "name" } } + } + } + } + } + } +} +---- + +<1> Matching API keys must not be invalidated +<2> Matching API keys must be either not expired or not have an expiration date +<3> Aggregate all matching keys (i.e. all valid keys) by their owner username +<4> Further aggregate the per-username valid keys into a soon-to-expire bucket + +===== Response + +[source,console-result] +---- +{ + "total" : 4, <1> + "count" : 0, + "api_keys" : [ ], + "aggregations" : { + "keys_by_username" : { + "after_key" : { + "usernames" : "king" + }, + "buckets" : [ + { + "key" : { + "usernames" : "june" + }, + "doc_count" : 2, <2> + "expires_soon" : { + "doc_count" : 1, + "key_names" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "june-key-10", + "doc_count" : 1 + } + ] } } }, { - "bool": { - "must_not": { - "exists": { - "field": "expiration" - } + "key" : { + "usernames" : "king" + }, + "doc_count" : 2, + "expires_soon" : { + "doc_count" : 1, <3> + "key_names" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ <4> + { + "key" : "king-key-10", + "doc_count" : 1 + } + ] } } } - ], - "minimum_should_match": 1 + ] } } } ---- -// NOTCONSOLE -<1> Matching API keys must not be invalidated -<2> Matching API keys must be either not expired or does not have an expiration date +<1> Total number of valid API keys (2 for each user) +<2> Number of valid API keys for user "june" +<3> Number of valid API keys expiring soon for user "king" +<4> The names of soon-to-expire API keys for user "king" + +To retrieve the invalidated (but not yet deleted) API keys, +grouped by owner username and API key name, issue the following request: + +===== Request + +[source,console] +---- +POST /_security/_query/api_key +{ + "size": 0, + "query": { + "bool": { + "filter": { + "term": { + "invalidated": true + } + } + } + }, + "aggs": { + "invalidated_keys": { + "composite": { + "sources": [ + { "username": { "terms": { "field": "username" } } }, + { "key_name": { "terms": { "field": "name" } } } + ] + } + } + } +} +---- + +===== Response + +[source,console-result] +---- +{ + "total" : 2, + "count" : 0, + "api_keys" : [ ], + "aggregations" : { + "invalidated_keys" : { + "after_key" : { + "username" : "king", + "key_name" : "king-key-no-expire" + }, + "buckets" : [ + { + "key" : { + "username" : "june", + "key_name" : "june-key-100" + }, + "doc_count" : 1 + }, + { + "key" : { + "username" : "king", + "key_name" : "king-key-no-expire" + }, + "doc_count" : 1 + } + ] + } + } +} +---- diff --git a/docs/reference/rest-api/security/query-user.asciidoc b/docs/reference/rest-api/security/query-user.asciidoc index 08ead0f389ee9..92c293d1fe593 100644 --- a/docs/reference/rest-api/security/query-user.asciidoc +++ b/docs/reference/rest-api/security/query-user.asciidoc @@ -61,9 +61,15 @@ The email of the user. `enabled`:: Specifies whether the user is enabled. - ==== +[[security-api-query-user-query-params]] +==== {api-query-parms-title} + +`with_profile_uid`:: +(Optional, boolean) Determines whether to retrieve the <> `uid`, +if exists, for the users. Defaults to `false`. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from] + By default, you cannot page through more than 10,000 hits using the `from` and @@ -218,6 +224,46 @@ A successful call returns a JSON structure for a user: -------------------------------------------------- // NOTCONSOLE +To retrieve the user `profile_uid` as part of the response: + +[source,console] +-------------------------------------------------- +GET /_security/_query/user?with_profile_uid=true +{ + "query": { + "prefix": { + "roles": "other" + } + } +} +-------------------------------------------------- +// TEST[setup:jacknich_user] + +[source,console-result] +-------------------------------------------------- +{ + "total": 1, + "count": 1, + "users": [ + { + "username": "jacknich", + "roles": [ + "admin", + "other_role1" + ], + "full_name": "Jack Nicholson", + "email": "jacknich@example.com", + "metadata": { + "intelligence": 7 + }, + "enabled": true, + "profile_uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0" + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + Use a `bool` query to issue complex logical conditions and use `from`, `size`, `sort` to help paginate the result: diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index 486da4c158652..5fe924d38e028 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -77,6 +77,10 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailab (Optional, Boolean) If `true`, unmapped fields that are mapped in one index but not in another are included in the response. Fields that don't have any mapping are never included. Defaults to `false`. +`include_empty_fields`:: + (Optional, Boolean) If `false`, fields that never had a value in any shards are not included in the response. Fields that are not empty are always included. This flag does not consider deletions and updates. If a field was non-empty and all the documents containing that field were deleted or the field was removed by updates, it will still be returned even if the flag is `false`. + Defaults to `true`. + `filters`:: (Optional, string) Comma-separated list of filters to apply to the response. + diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index 2ac248d5ea8e7..1077a63b00249 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -10,9 +10,12 @@ // tag::ml-settings-description-tag[] You do not need to configure any settings to use {ml}. It is enabled by default. -IMPORTANT: {ml-cap} uses SSE4.2 instructions, so it works only on machines whose -CPUs {wikipedia}/SSE4#Supporting_CPUs[support] SSE4.2. If you run {es} on older -hardware, you must disable {ml} (by setting `xpack.ml.enabled` to `false`). +IMPORTANT: {ml-cap} uses SSE4.2 instructions on x86_64 machines, so it works only +on x86_64 machines whose CPUs {wikipedia}/SSE4#Supporting_CPUs[support] SSE4.2. +(This limitation does not apply to aarch64 machines.) If you run {es} on older +x86_64 hardware, you must disable {ml} (by setting `xpack.ml.enabled` to `false`). +In this situation you should not attempt to use {ml} functionality in your cluster +at all. // end::ml-settings-description-tag[] @@ -46,7 +49,18 @@ that you use the default value for this setting on all nodes. + If set to `false`, the {ml} APIs are disabled on the node. For example, the node cannot open jobs, start {dfeeds}, receive transport (internal) communication -requests, or requests from clients (including {kib}) related to {ml} APIs. +requests, or requests from clients (including {kib}) related to {ml} APIs. If +`xpack.ml.enabled` is not set uniformly across all nodes in your cluster then you +are likely to experience problems with {ml} functionality not fully working. ++ +You must not use any {ml} functionality from ingest pipelines if `xpack.ml.enabled` +is `false` on any node. Before setting `xpack.ml.enabled` to `false` on a node, +consider whether you really meant to just exclude `ml` from the `node.roles`. +Excluding `ml` from the <> will stop the node from +running {ml} jobs and NLP models, but it will still be aware that {ml} functionality +exists. Setting `xpack.ml.enabled` to `false` should be reserved for situations +where you cannot use {ml} functionality at all in your cluster due to hardware +limitations as described <>. `xpack.ml.inference_model.cache_size`:: (<>) The maximum inference cache size allowed. diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 858902bb72ef2..49501c46b8ba9 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -16,8 +16,8 @@ To set up Elasticsearch in {ecloud}, sign up for a {ess-trial}[free {ecloud} tri If you want to install and manage {es} yourself, you can: -* Run {es} on any Linux, MacOS, or Windows machine. -* Run {es} in a <>. +* Run {es} using a <>. +* Run {es} in a <>. * Set up and manage {es}, {kib}, {agent}, and the rest of the Elastic Stack on Kubernetes with {eck-ref}[{eck}]. TIP: To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see <>. @@ -57,10 +57,18 @@ Elasticsearch website or from our RPM repository. + <> +TIP: For a step-by-step example of setting up the {stack} on your own premises, try out our tutorial: {stack-ref}/installing-stack-demo-self.html[Installing a self-managed Elastic Stack]. + +[discrete] +[[elasticsearch-docker-images]] +=== Elasticsearch container images + +You can also run {es} inside a container image. + +[horizontal] `docker`:: -Images are available for running Elasticsearch as Docker containers. They may be -downloaded from the Elastic Docker Registry. +Docker container images may be downloaded from the Elastic Docker Registry. + {ref}/docker.html[Install {es} with Docker] diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 8dfbca8c63210..a30c8c313b263 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -19,6 +19,8 @@ NOTE: Elasticsearch includes a bundled version of https://openjdk.java.net[OpenJ from the JDK maintainers (GPLv2+CE). To use your own version of Java, see the <> +TIP: For a step-by-step example of setting up the {stack} on your own premises, try out our tutorial: {stack-ref}/installing-stack-demo-self.html[Installing a self-managed Elastic Stack]. + [[rpm-key]] ==== Import the Elasticsearch GPG Key diff --git a/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc b/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc index ceb282a3966f5..a3910675b1632 100644 --- a/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc +++ b/docs/reference/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc @@ -89,7 +89,7 @@ https://www.elastic.co/guide/en/cloud-enterprise/current/ece-manage-repositories if you are using such a deployment. One common failure scenario is repository corruption. This occurs most often when multiple instances of {es} write to -the same repository location. There is a <> to fix this problem. +the same repository location. There is a <> to fix this problem. In the event that snapshots are failing for other reasons check the logs on the elected master node during the snapshot execution period for more information. @@ -163,7 +163,7 @@ Snapshots can fail for a variety reasons. If the failures are due to configurati documentation for the repository that the automated snapshots are using. One common failure scenario is repository corruption. This occurs most often when multiple instances of {es} write to -the same repository location. There is a <> to fix this problem. +the same repository location. There is a <> to fix this problem. In the event that snapshots are failing for other reasons check the logs on the elected master node during the snapshot execution period for more information. diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index de1f9e6c7a608..64df699d33638 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -43,7 +43,7 @@ fix problems that an {es} deployment might encounter. [[troubleshooting-snapshot]] === Snapshot and restore * <> -* <> +* <> * <> [discrete] diff --git a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc index dc2ce5a4bc252..0de4667bd9688 100644 --- a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc +++ b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc @@ -1,8 +1,15 @@ [[add-repository]] -== Multiple deployments writing to the same snapshot repository +== Troubleshooting broken repositories -Multiple {es} deployments are writing to the same snapshot repository. {es} doesn't -support this configuration and only one cluster is allowed to write to the same +There are several situations where the <> might report an issue +regarding the integrity of snapshot repositories in the cluster. This page explains +the recommended actions for diagnosing corrupted, unknown, and invalid repositories. + +[[diagnosing-corrupted-repositories]] +=== Diagnosing corrupted repositories + +Multiple {es} deployments are writing to the same snapshot repository. {es} doesn't +support this configuration and only one cluster is allowed to write to the same repository. See <> for potential side-effects of corruption of the repository contents, which may not be resolved by the following guide. @@ -11,3 +18,29 @@ other deployments, and re-add (recreate) the repository in the current deploymen include::{es-repo-dir}/tab-widgets/troubleshooting/snapshot/corrupt-repository-widget.asciidoc[] + +[[diagnosing-unknown-repositories]] +=== Diagnosing unknown repositories + +When a snapshot repository is marked as "unknown", it means that an {es} node is +unable to instantiate the repository due to an unknown repository type. This is +usually caused by a missing plugin on the node. Make sure each node in the cluster +has the required plugins by following the following steps: + +1. Retrieve the affected nodes from the affected resources section of the health report. +2. Use the <> to retrieve the plugins installed on each node. +3. Cross reference this with a node that works correctly to find out which plugins are missing +and install the missing plugins. + + +[[diagnosing-invalid-repositories]] +=== Diagnosing invalid repositories + +When an {es} node faces an unexpected exception when trying to instantiate a snapshot +repository, it will mark the repository as "invalid" and write a warning to the log file. +Use the following steps to diagnose the underlying cause of this issue: + +1. Retrieve the affected nodes from the affected resources section of the health report. +2. Refer to the logs of the affected node(s) and search for the repository name. +You should be able to find logs that will contain relevant exception. +3. Try to resolve the errors reported. diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 295cb08847f83..eb1df32798e58 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -756,6 +756,11 @@ + + + + + @@ -1725,6 +1730,11 @@ + + + + + @@ -1735,6 +1745,11 @@ + + + + + diff --git a/libs/build.gradle b/libs/build.gradle index 1ed3962fa01e8..a88618aea2fcc 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -28,6 +28,7 @@ configure(subprojects - project('elasticsearch-log4j')) { && false == depProject.path.equals(':libs:elasticsearch-core') && false == depProject.path.equals(':libs:elasticsearch-plugin-api') && false == depProject.path.equals(':libs:elasticsearch-logging') + && false == depProject.path.equals(':libs:elasticsearch-native') && depProject.path.startsWith(':libs') && depProject.name.startsWith('elasticsearch-')) { throw new InvalidUserDataException("projects in :libs " diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 1e20f4ae22949..a1d77cd02277e 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -12,6 +12,7 @@ apply plugin: 'elasticsearch.mrjar' dependencies { // This dependency is used only by :libs:core for null-checking interop with other tools compileOnly "com.google.code.findbugs:jsr305:3.0.2" + compileOnly project(':libs:elasticsearch-logging') testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" diff --git a/libs/core/src/main/java/module-info.java b/libs/core/src/main/java/module-info.java index 4818078c19458..21a0a9b2c646e 100644 --- a/libs/core/src/main/java/module-info.java +++ b/libs/core/src/main/java/module-info.java @@ -6,10 +6,15 @@ * Side Public License, v 1. */ +import org.elasticsearch.jdk.ModuleQualifiedExportsService; + module org.elasticsearch.base { requires static jsr305; + requires org.elasticsearch.logging; exports org.elasticsearch.core; exports org.elasticsearch.jdk; - exports org.elasticsearch.core.internal.provider to org.elasticsearch.xcontent; + exports org.elasticsearch.core.internal.provider to org.elasticsearch.xcontent, org.elasticsearch.nativeaccess; + + uses ModuleQualifiedExportsService; } diff --git a/libs/core/src/main/java/org/elasticsearch/core/CharArrays.java b/libs/core/src/main/java/org/elasticsearch/core/CharArrays.java index b2e74d5c79c48..eaa3615275346 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/CharArrays.java +++ b/libs/core/src/main/java/org/elasticsearch/core/CharArrays.java @@ -21,13 +21,17 @@ public final class CharArrays { private CharArrays() {} + public static char[] utf8BytesToChars(byte[] utf8Bytes) { + return utf8BytesToChars(utf8Bytes, 0, utf8Bytes.length); + } + /** * Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding * conversions to String. The provided byte[] is not modified by this method, so * the caller needs to take care of clearing the value if it is sensitive. */ - public static char[] utf8BytesToChars(byte[] utf8Bytes) { - final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); + public static char[] utf8BytesToChars(byte[] utf8Bytes, int offset, int len) { + final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes, offset, len); final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); final char[] chars; if (charBuffer.hasArray()) { diff --git a/libs/core/src/main/java/org/elasticsearch/core/SimpleRefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/SimpleRefCounted.java new file mode 100644 index 0000000000000..367133641d464 --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/SimpleRefCounted.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +/** + * {@link RefCounted} which does nothing when all references are released. It is the responsibility of the caller + * to run whatever release logic should be executed when {@link AbstractRefCounted#decRef()} returns true. + */ +public class SimpleRefCounted extends AbstractRefCounted { + @Override + protected void closeInternal() {} +} diff --git a/libs/core/src/main/java/org/elasticsearch/core/internal/provider/ProviderLocator.java b/libs/core/src/main/java/org/elasticsearch/core/internal/provider/ProviderLocator.java index a8bd88723bbef..b0e3df81ed28c 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/internal/provider/ProviderLocator.java +++ b/libs/core/src/main/java/org/elasticsearch/core/internal/provider/ProviderLocator.java @@ -8,6 +8,8 @@ package org.elasticsearch.core.internal.provider; +import org.elasticsearch.jdk.ModuleQualifiedExportsService; + import java.io.IOException; import java.io.UncheckedIOException; import java.lang.module.Configuration; @@ -22,6 +24,8 @@ import java.util.Set; import java.util.function.Supplier; +import static org.elasticsearch.jdk.ModuleQualifiedExportsService.exposeQualifiedExportsAndOpens; + /** * A provider locator that finds the implementation of the specified provider. * @@ -120,6 +124,10 @@ private T loadAsModule(EmbeddedImplClassLoader loader) throws IOException { ModuleLayer parentLayer = ModuleLayer.boot(); Configuration cf = parentLayer.configuration().resolve(ModuleFinder.of(), moduleFinder, Set.of(providerModuleName)); ModuleLayer layer = parentLayer.defineModules(cf, nm -> loader); // all modules in one loader + // check each module for boot modules that have qualified exports/opens to it + for (Module m : layer.modules()) { + exposeQualifiedExportsAndOpens(m, ModuleQualifiedExportsService.getBootServices()); + } ServiceLoader sl = ServiceLoader.load(layer, providerType); return sl.findFirst().orElseThrow(newIllegalStateException(providerName)); } diff --git a/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java b/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java index 0858493ea003d..9e255b109685f 100644 --- a/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java +++ b/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java @@ -8,6 +8,9 @@ package org.elasticsearch.jdk; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + import java.lang.module.ModuleDescriptor.Exports; import java.lang.module.ModuleDescriptor.Opens; import java.util.ArrayList; @@ -15,6 +18,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.ServiceLoader; import java.util.Set; import java.util.function.Function; import java.util.function.Predicate; @@ -24,14 +28,68 @@ /** * An object that provides a callback for qualified exports and opens. * - * Because Elasticsearch constructs plugin module layers dynamically, qualified - * exports are silently dropped in the boot layer. Modules that have qualified - * exports should implement this service so that when a qualified export - * module is loaded, the exporting or opening module can be informed and - * export or open dynamically to the newly loaded module. + * Because Elasticsearch sometimes constructs module layers dynamically + * (eg for plugins), qualified exports are silently dropped if a target + * module does not yet exist. Modules that have qualified exports to + * other dynamically created modules should implement this service so + * that when a qualified export module is loaded, the exporting or + * opening module can be informed and export or open dynamically to + * the newly loaded module. */ public abstract class ModuleQualifiedExportsService { + private static final Logger logger = LogManager.getLogger(ModuleQualifiedExportsService.class); + + // holds instances of ModuleQualfiedExportsService that exist in the boot layer + private static class Holder { + private static final Map> exportsServices; + + static { + Map> qualifiedExports = new HashMap<>(); + var loader = ServiceLoader.load(ModuleQualifiedExportsService.class, ModuleQualifiedExportsService.class.getClassLoader()); + for (var exportsService : loader) { + addExportsService(qualifiedExports, exportsService, exportsService.getClass().getModule().getName()); + } + exportsServices = Map.copyOf(qualifiedExports); + } + } + + /** + * A utility method to add an export service to the given map of exports services. + * + * The map is inverted, keyed by the target module name to which an exports/opens applies. + * + * @param qualifiedExports A map of modules to which qualfied exports need to be applied + * @param exportsService The exports service to add to the map + * @param moduleName The name of the module that is doing the exporting + */ + public static void addExportsService( + Map> qualifiedExports, + ModuleQualifiedExportsService exportsService, + String moduleName + ) { + for (String targetName : exportsService.getTargets()) { + logger.debug("Registered qualified export from module " + moduleName + " to " + targetName); + qualifiedExports.computeIfAbsent(targetName, k -> new ArrayList<>()).add(exportsService); + } + } + + /** + * Adds qualified exports and opens declared in other upstream modules to the target module. + * This is required since qualified statements targeting yet-to-be-created modules, i.e. plugins, + * are silently dropped when the boot layer is created. + */ + public static void exposeQualifiedExportsAndOpens(Module target, Map> qualifiedExports) { + qualifiedExports.getOrDefault(target.getName(), List.of()).forEach(exportService -> exportService.addExportsAndOpens(target)); + } + + /** + * Returns a mapping of ModuleQualifiedExportsServices that exist in the boot layer. + */ + public static Map> getBootServices() { + return Holder.exportsServices; + } + protected final Module module; private final Map> qualifiedExports; private final Map> qualifiedOpens; diff --git a/libs/native/build.gradle b/libs/native/build.gradle new file mode 100644 index 0000000000000..83a169ce7c2d1 --- /dev/null +++ b/libs/native/build.gradle @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.transform.UnzipTransform +import org.elasticsearch.gradle.internal.GenerateProviderManifest +import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask +import org.gradle.api.internal.artifacts.ArtifactAttributes + +import java.util.stream.Collectors + +apply plugin: 'elasticsearch.publish' +apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.mrjar' +apply plugin: 'elasticsearch.embedded-providers' + +embeddedProviders { + impl 'native-access-jna', project(':libs:elasticsearch-native:jna') +} + +dependencies { + api project(':libs:elasticsearch-core') + api project(':libs:elasticsearch-logging') + testImplementation(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-native' + } +} + +tasks.withType(CheckForbiddenApisTask).configureEach { + replaceSignatureFiles 'jdk-signatures' +} + +tasks.named('forbiddenApisMain21').configure { + ignoreMissingClasses = true +} diff --git a/libs/native/jna/build.gradle b/libs/native/jna/build.gradle new file mode 100644 index 0000000000000..555f17152c418 --- /dev/null +++ b/libs/native/jna/build.gradle @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.java' + +base { + archivesName = "native-access-jna" +} + +dependencies { + compileOnly project(':libs:elasticsearch-core') + compileOnly project(':libs:elasticsearch-native') + // TODO: this will become an implementation dep onces jna is removed from server + compileOnly "net.java.dev.jna:jna:${versions.jna}" + + testImplementation(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-native' + } +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} + +// not published, so no need for javadoc +tasks.named("javadoc").configure { enabled = false } diff --git a/libs/native/jna/licences/jna-LICENSE.txt b/libs/native/jna/licences/jna-LICENSE.txt new file mode 100644 index 0000000000000..f433b1a53f5b8 --- /dev/null +++ b/libs/native/jna/licences/jna-LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/libs/native/jna/licences/jna-NOTICE.txt b/libs/native/jna/licences/jna-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/libs/native/jna/licences/jna-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/native/jna/src/main/java/module-info.java b/libs/native/jna/src/main/java/module-info.java new file mode 100644 index 0000000000000..5c777170d2b56 --- /dev/null +++ b/libs/native/jna/src/main/java/module-info.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.nativeaccess.jna.JnaNativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; + +module org.elasticsearch.nativeaccess.jna { + requires org.elasticsearch.base; + requires org.elasticsearch.nativeaccess; + requires org.elasticsearch.logging; + requires com.sun.jna; + + provides NativeLibraryProvider with JnaNativeLibraryProvider; +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java new file mode 100644 index 0000000000000..a513e89b6a3b3 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; + +import java.util.Map; + +public class JnaNativeLibraryProvider extends NativeLibraryProvider { + public JnaNativeLibraryProvider() { + super("jna", Map.of(PosixCLibrary.class, JnaPosixCLibrary::new)); + } +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java new file mode 100644 index 0000000000000..bec9e75bdc2ce --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Native; + +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; + +class JnaPosixCLibrary implements PosixCLibrary { + + private interface NativeFunctions extends Library { + int geteuid(); + } + + private final NativeFunctions functions; + + JnaPosixCLibrary() { + this.functions = Native.load("c", NativeFunctions.class); + } + + @Override + public int geteuid() { + return functions.geteuid(); + } +} diff --git a/libs/native/jna/src/main/resources/META-INF/services/org.elasticsearch.nativeaccess.lib.NativeLibraryProvider b/libs/native/jna/src/main/resources/META-INF/services/org.elasticsearch.nativeaccess.lib.NativeLibraryProvider new file mode 100644 index 0000000000000..eb791137d7154 --- /dev/null +++ b/libs/native/jna/src/main/resources/META-INF/services/org.elasticsearch.nativeaccess.lib.NativeLibraryProvider @@ -0,0 +1 @@ +org.elasticsearch.nativeaccess.jna.JnaNativeLibraryProvider \ No newline at end of file diff --git a/libs/native/src/main/java/module-info.java b/libs/native/src/main/java/module-info.java new file mode 100644 index 0000000000000..dbbbebf5fd393 --- /dev/null +++ b/libs/native/src/main/java/module-info.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.jdk.ModuleQualifiedExportsService; +import org.elasticsearch.nativeaccess.exports.NativeAccessModuleExportsService; +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; + +module org.elasticsearch.nativeaccess { + requires org.elasticsearch.base; + requires org.elasticsearch.logging; + + exports org.elasticsearch.nativeaccess to org.elasticsearch.server; + // allows jna to implement a library provider, and ProviderLocator to load it + exports org.elasticsearch.nativeaccess.lib to org.elasticsearch.nativeaccess.jna, org.elasticsearch.base; + + uses NativeLibraryProvider; + + // allows qualified exports from this module to modules not in the boot layer, ie jna + exports org.elasticsearch.nativeaccess.exports to org.elasticsearch.base; + + provides ModuleQualifiedExportsService with NativeAccessModuleExportsService; + +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java new file mode 100644 index 0000000000000..5f69101696884 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +abstract class AbstractNativeAccess implements NativeAccess { + + protected static final Logger logger = LogManager.getLogger(NativeAccess.class); + + private final String name; + + protected AbstractNativeAccess(String name) { + this.name = name; + } + + String getName() { + return name; + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java new file mode 100644 index 0000000000000..f990dbdf2d9de --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; + +class LinuxNativeAccess extends PosixNativeAccess { + LinuxNativeAccess(NativeLibraryProvider libraryProvider) { + super("Linux", libraryProvider); + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java new file mode 100644 index 0000000000000..9f29ac7668a47 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; + +class MacNativeAccess extends PosixNativeAccess { + + MacNativeAccess(NativeLibraryProvider libraryProvider) { + super("MacOS", libraryProvider); + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java new file mode 100644 index 0000000000000..5091c75041786 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +/** + * Provides access to native functionality needed by Elastisearch. + */ +public interface NativeAccess { + + /** + * Get the one and only instance of {@link NativeAccess} which is specific to the running platform and JVM. + */ + static NativeAccess instance() { + return NativeAccessHolder.INSTANCE; + } + + /** + * Determine whether this JVM is running as the root user. + * + * @return true if running as root, or false if unsure + */ + boolean definitelyRunningAsRoot(); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java new file mode 100644 index 0000000000000..6abbe02c47865 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccessHolder.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; + +class NativeAccessHolder { + + protected static final Logger logger = LogManager.getLogger(NativeAccess.class); + + static final NativeAccess INSTANCE; + + static { + var libProvider = NativeLibraryProvider.instance(); + var os = System.getProperty("os.name"); + + AbstractNativeAccess inst = null; + try { + if (os.startsWith("Linux")) { + inst = new LinuxNativeAccess(libProvider); + } else if (os.startsWith("Mac OS")) { + inst = new MacNativeAccess(libProvider); + } else if (os.startsWith("Windows")) { + inst = new WindowsNativeAccess(libProvider); + } else { + logger.warn("Unsupported OS [" + os + "]. Native methods will be disabled."); + } + } catch (LinkageError e) { + logger.warn("Unable to load native provider. Native methods will be disabled.", e); + } + if (inst == null) { + inst = new NoopNativeAccess(); + } else { + logger.info("Using [" + libProvider.getName() + "] native provider and native methods for [" + inst.getName() + "]"); + } + INSTANCE = inst; + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java new file mode 100644 index 0000000000000..2bc06f21c9775 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +class NoopNativeAccess extends AbstractNativeAccess { + + NoopNativeAccess() { + super("noop"); + } + + @Override + public boolean definitelyRunningAsRoot() { + logger.warn("Cannot check if running as root because native access is not available"); + return false; + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java new file mode 100644 index 0000000000000..050f9e89a0678 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; + +abstract class PosixNativeAccess extends AbstractNativeAccess { + + protected final PosixCLibrary libc; + + PosixNativeAccess(String name, NativeLibraryProvider libraryProvider) { + super(name); + this.libc = libraryProvider.getLibrary(PosixCLibrary.class); + } + + @Override + public boolean definitelyRunningAsRoot() { + return libc.geteuid() == 0; + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java new file mode 100644 index 0000000000000..86d3952e1504c --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess; + +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; + +class WindowsNativeAccess extends AbstractNativeAccess { + + WindowsNativeAccess(NativeLibraryProvider libraryProvider) { + super("Windows"); + } + + @Override + public boolean definitelyRunningAsRoot() { + return false; // don't know + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/exports/NativeAccessModuleExportsService.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/exports/NativeAccessModuleExportsService.java new file mode 100644 index 0000000000000..f640b60f8ac76 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/exports/NativeAccessModuleExportsService.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.exports; + +import org.elasticsearch.jdk.ModuleQualifiedExportsService; + +public class NativeAccessModuleExportsService extends ModuleQualifiedExportsService { + @Override + protected void addExports(String pkg, Module target) { + module.addExports(pkg, target); + } + + @Override + protected void addOpens(String pkg, Module target) { + module.addOpens(pkg, target); + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java new file mode 100644 index 0000000000000..39a4137aeb0f2 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +/** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ +public sealed interface NativeLibrary permits PosixCLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibraryProvider.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibraryProvider.java new file mode 100644 index 0000000000000..737d5aecd0d0a --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibraryProvider.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +import org.elasticsearch.core.internal.provider.ProviderLocator; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.util.Map; +import java.util.Set; +import java.util.function.Supplier; + +/** + * Allows loading native library mappings. + */ +public abstract class NativeLibraryProvider { + + private final String name; + private final Map, Supplier> libraries; + + protected NativeLibraryProvider(String name, Map, Supplier> libraries) { + this.name = name; + this.libraries = libraries; + + // ensure impls actually provide all necessary libraries + for (Class libClass : NativeLibrary.class.getPermittedSubclasses()) { + if (libraries.containsKey(libClass) == false) { + throw new IllegalStateException(getClass().getSimpleName() + " missing implementation for " + libClass.getSimpleName()); + } + } + } + + /** + * Get the one and only instance of {@link NativeLibraryProvider} that is specific to the running JDK version. + */ + public static NativeLibraryProvider instance() { + return Holder.INSTANCE; + } + + /** Returns a human-understandable name for this provider */ + public String getName() { + return name; + } + + /** + * Construct an instance of the given library class. + * @param cls The library class to create + * @return An instance of the class + */ + public T getLibrary(Class cls) { + Supplier libraryCtor = libraries.get(cls); + Object library = libraryCtor.get(); + assert library != null; + assert cls.isAssignableFrom(library.getClass()); + return cls.cast(library); + } + + private static NativeLibraryProvider loadProvider() { + final int runtimeVersion = Runtime.version().feature(); + if (runtimeVersion >= 21) { + return loadJdkImpl(runtimeVersion); + } + return loadJnaImpl(); + } + + private static NativeLibraryProvider loadJdkImpl(int runtimeVersion) { + try { + var lookup = MethodHandles.lookup(); + var clazz = lookup.findClass("org.elasticsearch.nativeaccess.jdk.JdkNativeLibraryProvider"); + var constructor = lookup.findConstructor(clazz, MethodType.methodType(void.class)); + try { + return (NativeLibraryProvider) constructor.invoke(); + } catch (Throwable t) { + throw new AssertionError(t); + } + } catch (NoSuchMethodException | IllegalAccessException e) { + throw new LinkageError("NativeLibraryProvider for Java " + runtimeVersion + " has a bad constructor", e); + } catch (ClassNotFoundException cnfe) { + throw new LinkageError("NativeLibraryProvider is missing for Java " + runtimeVersion, cnfe); + } + } + + private static NativeLibraryProvider loadJnaImpl() { + return new ProviderLocator<>("native-access-jna", NativeLibraryProvider.class, "org.elasticsearch.nativeaccess.jna", Set.of()) + .get(); + } + + private static final class Holder { + private Holder() {} + + static final NativeLibraryProvider INSTANCE = loadProvider(); + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java new file mode 100644 index 0000000000000..ecc28c682027a --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +/** + * Provides access to methods in libc.so available on POSIX systems. + */ +public non-sealed interface PosixCLibrary extends NativeLibrary { + + /** + * Gets the effective userid of the current process. + * + * @return the effective user id + * @see geteuid + */ + int geteuid(); +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java new file mode 100644 index 0000000000000..48364bce57fdb --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; + +import java.util.Map; + +public class JdkNativeLibraryProvider extends NativeLibraryProvider { + + public JdkNativeLibraryProvider() { + super("jdk", Map.of(PosixCLibrary.class, JdkPosixCLibrary::new)); + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java new file mode 100644 index 0000000000000..45993d6b20e0a --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; + +import java.lang.foreign.FunctionDescriptor; +import java.lang.invoke.MethodHandle; + +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkPosixCLibrary implements PosixCLibrary { + + private static final Logger logger = LogManager.getLogger(JdkPosixCLibrary.class); + + private static final MethodHandle geteuid$mh = downcallHandle("geteuid", FunctionDescriptor.of(JAVA_INT)); + + @Override + public int geteuid() { + try { + return (int) geteuid$mh.invokeExact(); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/LinkerHelper.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/LinkerHelper.java new file mode 100644 index 0000000000000..c0224efb0ae9e --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/LinkerHelper.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import java.lang.foreign.Arena; +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.Linker; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.SymbolLookup; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; + +/** + * Utility methods for calling into the native linker. + */ +class LinkerHelper { + private static final Linker LINKER = Linker.nativeLinker(); + private static final SymbolLookup SYMBOL_LOOKUP; + private static final MethodHandles.Lookup MH_LOOKUP = MethodHandles.publicLookup(); + + static { + // We first check the loader lookup, which contains libs loaded by System.load and System.loadLibrary. + // If the symbol isn't found there, we fall back to the default lookup, which is "common libraries" for + // the platform, typically eg libc + SymbolLookup loaderLookup = SymbolLookup.loaderLookup(); + SYMBOL_LOOKUP = (name) -> loaderLookup.find(name).or(() -> LINKER.defaultLookup().find(name)); + } + + static MemorySegment functionAddress(String function) { + return SYMBOL_LOOKUP.find(function).orElseThrow(() -> new LinkageError("Native function " + function + " could not be found")); + } + + static MethodHandle downcallHandle(String function, FunctionDescriptor functionDescriptor, Linker.Option... options) { + return LINKER.downcallHandle(functionAddress(function), functionDescriptor, options); + } + + static MethodHandle upcallHandle(Class clazz, String methodName, FunctionDescriptor functionDescriptor) { + try { + return MH_LOOKUP.findVirtual(clazz, methodName, functionDescriptor.toMethodType()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + static MemorySegment upcallStub(MethodHandle mh, T instance, FunctionDescriptor functionDescriptor, Arena arena) { + try { + mh = mh.bindTo(instance); + return LINKER.upcallStub(mh, functionDescriptor, arena); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java new file mode 100644 index 0000000000000..e841b38c0059e --- /dev/null +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.preallocate; + +import com.sun.jna.FunctionMapper; +import com.sun.jna.Library; +import com.sun.jna.Native; +import com.sun.jna.NativeLong; +import com.sun.jna.Platform; +import com.sun.jna.Structure; + +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Locale; +import java.util.Map; + +abstract class AbstractPosixPreallocator implements Preallocator { + + /** + * Constants relating to posix libc. + * + * @param SIZEOF_STAT The size of the stat64 structure, ie sizeof(stat64_t), found by importing sys/stat.h + * @param STAT_ST_SIZE_OFFSET The offsite into stat64 at which st_size exists, ie offsetof(stat64_t, st_size), + * found by importing sys/stat.h + * @param O_CREAT The file mode for creating a file upon opening, found by importing fcntl.h + */ + protected record PosixConstants(int SIZEOF_STAT, int STAT_ST_SIZE_OFFSET, int O_CREAT) {} + + private static final int O_WRONLY = 1; + + static final class Stat64 extends Structure implements Structure.ByReference { + public byte[] _ignore1; + public NativeLong st_size = new NativeLong(0); + public byte[] _ignore2; + + Stat64(int sizeof, int stSizeOffset) { + this._ignore1 = new byte[stSizeOffset]; + this._ignore2 = new byte[sizeof - stSizeOffset - 8]; + } + } + + private interface NativeFunctions extends Library { + String strerror(int errno); + + int open(String filename, int flags, Object... mode); + + int close(int fd); + } + + private interface FStat64Function extends Library { + int fstat64(int fd, Stat64 stat); + } + + public static final boolean NATIVES_AVAILABLE; + private static final NativeFunctions functions; + private static final FStat64Function fstat64; + + static { + functions = AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Native.load(Platform.C_LIBRARY_NAME, NativeFunctions.class); + } catch (final UnsatisfiedLinkError e) { + return null; + } + }); + fstat64 = AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Native.load(Platform.C_LIBRARY_NAME, FStat64Function.class); + } catch (final UnsatisfiedLinkError e) { + try { + // on Linux fstat64 isn't available as a symbol, but instead uses a special __ name + var options = Map.of(Library.OPTION_FUNCTION_MAPPER, (FunctionMapper) (lib, method) -> "__fxstat64"); + return Native.load(Platform.C_LIBRARY_NAME, FStat64Function.class, options); + } catch (UnsatisfiedLinkError e2) { + return null; + } + } + }); + NATIVES_AVAILABLE = functions != null && fstat64 != null; + } + + private class PosixNativeFileHandle implements NativeFileHandle { + + private final int fd; + + PosixNativeFileHandle(int fd) { + this.fd = fd; + } + + @Override + public int fd() { + return fd; + } + + @Override + public long getSize() throws IOException { + var stat = new Stat64(constants.SIZEOF_STAT, constants.STAT_ST_SIZE_OFFSET); + if (fstat64.fstat64(fd, stat) == -1) { + throw newIOException("Could not get size of file"); + } + return stat.st_size.longValue(); + } + + @Override + public void close() throws IOException { + if (functions.close(fd) != 0) { + throw newIOException("Could not close file"); + } + } + } + + protected final PosixConstants constants; + + AbstractPosixPreallocator(PosixConstants constants) { + this.constants = constants; + } + + @Override + public boolean useNative() { + return false; + } + + @Override + public NativeFileHandle open(String path) throws IOException { + int fd = functions.open(path, O_WRONLY, constants.O_CREAT); + if (fd < 0) { + throw newIOException(String.format(Locale.ROOT, "Could not open file [%s] for preallocation", path)); + } + return new PosixNativeFileHandle(fd); + } + + @Override + public String error(int errno) { + return functions.strerror(errno); + } + + private static IOException newIOException(String prefix) { + int errno = Native.getLastError(); + return new IOException(String.format(Locale.ROOT, "%s(errno=%d): %s", prefix, errno, functions.strerror(errno))); + } +} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java index 6477c053f1efa..25ad4a26fd03e 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java @@ -13,11 +13,15 @@ import java.security.AccessController; import java.security.PrivilegedAction; -final class LinuxPreallocator implements Preallocator { +final class LinuxPreallocator extends AbstractPosixPreallocator { + + LinuxPreallocator() { + super(new PosixConstants(144, 48, 64)); + } @Override public boolean useNative() { - return Natives.NATIVES_AVAILABLE; + return Natives.NATIVES_AVAILABLE && super.useNative(); } @Override @@ -26,11 +30,6 @@ public int preallocate(final int fd, final long currentSize, final long fileSize return rc == 0 ? 0 : Native.getLastError(); } - @Override - public String error(int errno) { - return Natives.strerror(errno); - } - private static class Natives { public static final boolean NATIVES_AVAILABLE; @@ -47,9 +46,6 @@ private static class Natives { } static native int fallocate(int fd, int mode, long offset, long length); - - static native String strerror(int errno); - } } diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java index 9e5445320ce69..149cf80527bd0 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java @@ -17,11 +17,15 @@ import java.util.Arrays; import java.util.List; -final class MacOsPreallocator implements Preallocator { +final class MacOsPreallocator extends AbstractPosixPreallocator { + + MacOsPreallocator() { + super(new PosixConstants(144, 96, 512)); + } @Override public boolean useNative() { - return Natives.NATIVES_AVAILABLE; + return Natives.NATIVES_AVAILABLE && super.useNative(); } @Override @@ -47,11 +51,6 @@ public int preallocate(final int fd, final long currentSize /* unused */ , final return 0; } - @Override - public String error(final int errno) { - return Natives.strerror(errno); - } - private static class Natives { static boolean NATIVES_AVAILABLE; @@ -99,9 +98,6 @@ protected List getFieldOrder() { static native int fcntl(int fd, int cmd, Fcntl.FStore fst); static native int ftruncate(int fd, NativeLong length); - - static native String strerror(int errno); - } } diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java index 8e510ba16e55d..447b178ba41d9 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java @@ -7,6 +7,8 @@ */ package org.elasticsearch.preallocate; +import java.io.IOException; + final class NoNativePreallocator implements Preallocator { @Override @@ -14,6 +16,11 @@ public boolean useNative() { return false; } + @Override + public NativeFileHandle open(String path) throws IOException { + throw new UnsupportedOperationException(); + } + @Override public int preallocate(final int fd, final long currentSize, final long fileSize) { throw new UnsupportedOperationException(); diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java index 301a21f94004a..8f7214e0877ba 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java @@ -10,6 +10,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.preallocate.Preallocator.NativeFileHandle; import java.io.FileOutputStream; import java.io.IOException; @@ -17,7 +18,6 @@ import java.lang.reflect.Field; import java.nio.file.Files; import java.nio.file.Path; -import java.security.AccessController; import java.security.PrivilegedExceptionAction; public class Preallocate { @@ -42,21 +42,16 @@ public static void preallocate(final Path cacheFile, final long fileSize) throws } } - @SuppressForbidden(reason = "need access to fd on FileOutputStream") + @SuppressForbidden(reason = "need access to toFile for RandomAccessFile") private static void preallocate(final Path cacheFile, final long fileSize, final Preallocator prealloactor) throws IOException { boolean success = false; try { if (prealloactor.useNative()) { - try (FileOutputStream fileChannel = new FileOutputStream(cacheFile.toFile())) { - long currentSize = fileChannel.getChannel().size(); + try (NativeFileHandle openFile = prealloactor.open(cacheFile.toAbsolutePath().toString())) { + long currentSize = openFile.getSize(); if (currentSize < fileSize) { logger.info("pre-allocating cache file [{}] ({} bytes) using native methods", cacheFile, fileSize); - final Field field = AccessController.doPrivileged(new FileDescriptorFieldAction(fileChannel)); - final int errno = prealloactor.preallocate( - (int) field.get(fileChannel.getFD()), - currentSize, - fileSize - currentSize - ); + final int errno = prealloactor.preallocate(openFile.fd(), currentSize, fileSize - currentSize); if (errno == 0) { success = true; logger.debug("pre-allocated cache file [{}] using native methods", cacheFile); diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java index 653d121a39f30..b70b3ff03f4bd 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java @@ -7,11 +7,22 @@ */ package org.elasticsearch.preallocate; +import java.io.IOException; + /** * Represents platform native methods for pre-allocating files. */ interface Preallocator { + /** A handle for an open file */ + interface NativeFileHandle extends AutoCloseable { + /** A valid native file descriptor */ + int fd(); + + /** Retrieves the current size of the file */ + long getSize() throws IOException; + } + /** * Returns if native methods for pre-allocating files are available. * @@ -19,6 +30,14 @@ interface Preallocator { */ boolean useNative(); + /** + * Open a file for preallocation. + * + * @param path The absolute path to the file to be opened + * @return a handle to the open file that may be used for preallocate + */ + NativeFileHandle open(String path) throws IOException; + /** * Pre-allocate a file of given current size to the specified size using the given file descriptor. * diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index 5c9dd49c007b8..15a79364559a2 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -6,44 +6,17 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.transform.UnzipTransform -import org.elasticsearch.gradle.internal.GenerateProviderManifest -import org.gradle.api.internal.artifacts.ArtifactAttributes - -import java.util.stream.Collectors - apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' +apply plugin: 'elasticsearch.embedded-providers' -def isImplAttr = Attribute.of("is.impl", Boolean) - -configurations { - providerImpl { - attributes.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) - attributes.attribute(isImplAttr, true) - } +embeddedProviders { + impl 'x-content', project(':libs:elasticsearch-x-content:impl') } dependencies { - registerTransform( - UnzipTransform.class, transformSpec -> { - transformSpec.getFrom() - .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.JAR_TYPE) - .attribute(isImplAttr, true) - transformSpec.getTo() - .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) - .attribute(isImplAttr, true) - transformSpec.parameters(parameters -> { - parameters.includeArtifactName.set(true) - }) - - }) - api project(':libs:elasticsearch-core') - providerImpl project(':libs:elasticsearch-x-content:impl') - testImplementation(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' } @@ -66,18 +39,3 @@ tasks.named("thirdPartyAudit").configure { tasks.named("dependencyLicenses").configure { mapping from: /jackson-.*/, to: 'jackson' } - -Directory generatedResourcesDir = layout.buildDirectory.dir('generated-resources').get() -def generateProviderManifest = tasks.register("generateProviderManifest", GenerateProviderManifest.class) { - manifestFile = generatedResourcesDir.file("LISTING.TXT") - getProviderImplClasspath().from(configurations.providerImpl) -} - -def generateProviderImpl = tasks.register("generateProviderImpl", Sync) { - destinationDir = generatedResourcesDir.dir("impl").getAsFile() - into("IMPL-JARS/x-content") { - from(configurations.providerImpl) - from(generateProviderManifest) - } -} -sourceSets.main.output.dir(generateProviderImpl) diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/NamedXContentRegistry.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/NamedXContentRegistry.java index 021ea4c09776e..02caac775c5e3 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/NamedXContentRegistry.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/NamedXContentRegistry.java @@ -12,6 +12,7 @@ import org.elasticsearch.core.RestApiVersion; import java.io.IOException; +import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -95,7 +96,7 @@ private static Map, Map>> createRegi return emptyMap(); } - Map, Map>> newRegistry = new HashMap<>(); + Map, Map>> newRegistry = new EnumMap<>(RestApiVersion.class); for (Entry entry : entries) { for (String name : entry.name.getAllNamesIncludedDeprecated()) { if (RestApiVersion.minimumSupported().matches(entry.restApiCompatibility)) { diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java index d63c61eea876c..41512af0f79d4 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java @@ -976,7 +976,7 @@ public XContentBuilder value(Map map) throws IOException { return map(map); } - private XContentBuilder value(ToXContent value, ToXContent.Params params) throws IOException { + public XContentBuilder value(ToXContent value, ToXContent.Params params) throws IOException { if (value == null) { return nullValue(); } diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java index b1db2f8a7d3a1..97c75689fe5dc 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java @@ -70,7 +70,7 @@ public void setUp() throws Exception { afterIndex = randomAlphaOfLength(12).toLowerCase(Locale.ROOT); startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(START_TIME); endTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(END_TIME); - numTimeSeries = 5_000; + numTimeSeries = 500; // NOTE: we need to use few dimensions to be able to index documents in an index created before introducing TSID hashing numDimensions = randomIntBetween(10, 20); @@ -275,20 +275,14 @@ public String toString() { @Override public Iterator iterator() { - return new TimeSeriesIterator(this.dataset.entrySet()); + return new TimeSeriesIterator(this.dataset.entrySet().iterator()); } public int size() { return this.dataset.size(); } - static class TimeSeriesIterator implements Iterator { - - private final Iterator> it; - - TimeSeriesIterator(final Set> entries) { - this.it = entries.iterator(); - } + record TimeSeriesIterator(Iterator> it) implements Iterator { @Override public boolean hasNext() { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index c17cc004e25b5..8802ffd41571d 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; @@ -175,27 +176,32 @@ public InternalBucket getBucketByKey(String key) { } @Override - public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { + protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { Map> bucketsMap = new HashMap<>(); - for (InternalAggregation aggregation : aggregations) { - InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; - for (InternalBucket bucket : filters.buckets) { - List sameRangeList = bucketsMap.computeIfAbsent(bucket.key, k -> new ArrayList<>(aggregations.size())); - sameRangeList.add(bucket); + return new AggregatorReducer() { + @Override + public void accept(InternalAggregation aggregation) { + InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; + for (InternalBucket bucket : filters.buckets) { + List sameRangeList = bucketsMap.computeIfAbsent(bucket.key, k -> new ArrayList<>(size)); + sameRangeList.add(bucket); + } } - } - ArrayList reducedBuckets = new ArrayList<>(bucketsMap.size()); - for (List sameRangeList : bucketsMap.values()) { - InternalBucket reducedBucket = reduceBucket(sameRangeList, reduceContext); - if (reducedBucket.docCount >= 1) { - reducedBuckets.add(reducedBucket); + @Override + public InternalAggregation get() { + List reducedBuckets = new ArrayList<>(bucketsMap.size()); + for (List sameRangeList : bucketsMap.values()) { + InternalBucket reducedBucket = reduceBucket(sameRangeList, reduceContext); + if (reducedBucket.docCount >= 1) { + reducedBuckets.add(reducedBucket); + } + } + reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); + reducedBuckets.sort(Comparator.comparing(InternalBucket::getKey)); + return new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); } - } - reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); - reducedBuckets.sort(Comparator.comparing(InternalBucket::getKey)); - - return new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); + }; } @Override @@ -203,20 +209,18 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return new InternalAdjacencyMatrix(name, buckets.stream().map(b -> b.finalizeSampling(samplingContext)).toList(), getMetadata()); } - @Override - protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { - assert buckets.size() > 0; + private InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { + assert buckets.isEmpty() == false; InternalBucket reduced = null; - List aggregationsList = new ArrayList<>(buckets.size()); for (InternalBucket bucket : buckets) { if (reduced == null) { reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations); } else { reduced.docCount += bucket.docCount; } - aggregationsList.add(bucket.aggregations); } - reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); + final List aggregations = new BucketAggregationList<>(buckets); + reduced.aggregations = InternalAggregations.reduce(aggregations, context); return reduced; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index de36a9721fe38..f0dfad88c87b4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; @@ -292,15 +293,14 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) * rounding returned across all the shards so the resolution of the buckets * is the same and they can be reduced together. */ - private BucketReduceResult reduceBuckets(List aggregations, AggregationReduceContext reduceContext) { + private BucketReduceResult reduceBuckets(List aggregations, AggregationReduceContext reduceContext) { // First we need to find the highest level rounding used across all the // shards int reduceRoundingIdx = 0; long min = Long.MAX_VALUE; long max = Long.MIN_VALUE; - for (InternalAggregation aggregation : aggregations) { - InternalAutoDateHistogram agg = ((InternalAutoDateHistogram) aggregation); + for (InternalAutoDateHistogram agg : aggregations) { reduceRoundingIdx = Math.max(agg.bucketInfo.roundingIdx, reduceRoundingIdx); if (false == agg.buckets.isEmpty()) { min = Math.min(min, agg.buckets.get(0).key); @@ -315,8 +315,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent(histogram.buckets.iterator())); } @@ -408,16 +407,14 @@ private List mergeBuckets( return reducedBuckets; } - @Override - protected Bucket reduceBucket(List buckets, AggregationReduceContext context) { - assert buckets.size() > 0; - List aggregations = new ArrayList<>(buckets.size()); + private Bucket reduceBucket(List buckets, AggregationReduceContext context) { + assert buckets.isEmpty() == false; long docCount = 0; for (Bucket bucket : buckets) { docCount += bucket.docCount; - aggregations.add(bucket.getAggregations()); } - InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); + final List aggregations = new BucketAggregationList<>(buckets); + final InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); return new InternalAutoDateHistogram.Bucket(buckets.get(0).key, docCount, format, aggs); } @@ -509,35 +506,47 @@ static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, Rou } @Override - public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { - BucketReduceResult reducedBucketsResult = reduceBuckets(aggregations, reduceContext); + protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { + return new AggregatorReducer() { + final List aggregations = new ArrayList<>(size); - if (reduceContext.isFinalReduce()) { - // adding empty buckets if needed - reducedBucketsResult = addEmptyBuckets(reducedBucketsResult, reduceContext); + @Override + public void accept(InternalAggregation aggregation) { + aggregations.add((InternalAutoDateHistogram) aggregation); + } - // Adding empty buckets may have tipped us over the target so merge the buckets again if needed - reducedBucketsResult = mergeBucketsIfNeeded(reducedBucketsResult, reduceContext); + @Override + public InternalAggregation get() { + BucketReduceResult reducedBucketsResult = reduceBuckets(aggregations, reduceContext); - // Now finally see if we need to merge consecutive buckets together to make a coarser interval at the same rounding - reducedBucketsResult = maybeMergeConsecutiveBuckets(reducedBucketsResult, reduceContext); - } - reduceContext.consumeBucketsAndMaybeBreak(reducedBucketsResult.buckets.size()); - BucketInfo bucketInfo = new BucketInfo( - this.bucketInfo.roundingInfos, - reducedBucketsResult.roundingIdx, - this.bucketInfo.emptySubAggregations - ); + if (reduceContext.isFinalReduce()) { + // adding empty buckets if needed + reducedBucketsResult = addEmptyBuckets(reducedBucketsResult, reduceContext); - return new InternalAutoDateHistogram( - getName(), - reducedBucketsResult.buckets, - targetBuckets, - bucketInfo, - format, - getMetadata(), - reducedBucketsResult.innerInterval - ); + // Adding empty buckets may have tipped us over the target so merge the buckets again if needed + reducedBucketsResult = mergeBucketsIfNeeded(reducedBucketsResult, reduceContext); + + // Now finally see if we need to merge consecutive buckets together to make a coarser interval at the same rounding + reducedBucketsResult = maybeMergeConsecutiveBuckets(reducedBucketsResult, reduceContext); + } + reduceContext.consumeBucketsAndMaybeBreak(reducedBucketsResult.buckets.size()); + BucketInfo bucketInfo = new BucketInfo( + getBucketInfo().roundingInfos, + reducedBucketsResult.roundingIdx, + getBucketInfo().emptySubAggregations + ); + + return new InternalAutoDateHistogram( + getName(), + reducedBucketsResult.buckets, + targetBuckets, + bucketInfo, + format, + getMetadata(), + reducedBucketsResult.innerInterval + ); + } + }; } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index 67a7773fd01bb..c20d3f3ba612a 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; @@ -177,69 +178,70 @@ protected void doWriteTo(StreamOutput out) throws IOException { } @Override - public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { - // TODO: optimize single result case either by having a if check here and return aggregations.get(0) or - // by overwriting the mustReduceOnSingleInternalAgg() method - final int initialCapacity = aggregations.stream() - .map(value -> (InternalTimeSeries) value) - .mapToInt(value -> value.getBuckets().size()) - .max() - .getAsInt(); - - final PriorityQueue> pq = new PriorityQueue<>(aggregations.size()) { + protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { + final PriorityQueue> pq = new PriorityQueue<>(size) { @Override protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { return a.current().key.compareTo(b.current().key) < 0; } }; - for (InternalAggregation aggregation : aggregations) { - InternalTimeSeries timeSeries = (InternalTimeSeries) aggregation; - if (timeSeries.buckets.isEmpty() == false) { - IteratorAndCurrent iterator = new IteratorAndCurrent<>(timeSeries.buckets.iterator()); - pq.add(iterator); + return new AggregatorReducer() { + int initialCapacity = 0; + + @Override + public void accept(InternalAggregation aggregation) { + InternalTimeSeries timeSeries = (InternalTimeSeries) aggregation; + if (timeSeries.buckets.isEmpty() == false) { + initialCapacity = Math.max(initialCapacity, timeSeries.buckets.size()); + IteratorAndCurrent iterator = new IteratorAndCurrent<>(timeSeries.buckets.iterator()); + pq.add(iterator); + } } - } - InternalTimeSeries reduced = new InternalTimeSeries(name, new ArrayList<>(initialCapacity), keyed, getMetadata()); - Integer size = reduceContext.builder() instanceof TimeSeriesAggregationBuilder - ? ((TimeSeriesAggregationBuilder) reduceContext.builder()).getSize() - : null; // tests may use a fake builder - List bucketsWithSameKey = new ArrayList<>(aggregations.size()); - BytesRef prevTsid = null; - while (pq.size() > 0) { - reduceContext.consumeBucketsAndMaybeBreak(1); - bucketsWithSameKey.clear(); + @Override + public InternalAggregation get() { + InternalTimeSeries reduced = new InternalTimeSeries(name, new ArrayList<>(initialCapacity), keyed, getMetadata()); + List bucketsWithSameKey = new ArrayList<>(size); // TODO: not sure about this size? + Integer size = reduceContext.builder() instanceof TimeSeriesAggregationBuilder + ? ((TimeSeriesAggregationBuilder) reduceContext.builder()).getSize() + : null; // tests may use a fake builder + BytesRef prevTsid = null; + while (pq.size() > 0) { + reduceContext.consumeBucketsAndMaybeBreak(1); + bucketsWithSameKey.clear(); + + while (bucketsWithSameKey.isEmpty() || bucketsWithSameKey.get(0).key.equals(pq.top().current().key)) { + IteratorAndCurrent iterator = pq.top(); + bucketsWithSameKey.add(iterator.current()); + if (iterator.hasNext()) { + iterator.next(); + pq.updateTop(); + } else { + pq.pop(); + if (pq.size() == 0) { + break; + } + } + } - while (bucketsWithSameKey.isEmpty() || bucketsWithSameKey.get(0).key.equals(pq.top().current().key)) { - IteratorAndCurrent iterator = pq.top(); - bucketsWithSameKey.add(iterator.current()); - if (iterator.hasNext()) { - iterator.next(); - pq.updateTop(); - } else { - pq.pop(); - if (pq.size() == 0) { + InternalBucket reducedBucket; + if (bucketsWithSameKey.size() == 1) { + reducedBucket = bucketsWithSameKey.get(0); + reducedBucket.aggregations = InternalAggregations.reduce(List.of(reducedBucket.aggregations), reduceContext); + } else { + reducedBucket = reduceBucket(bucketsWithSameKey, reduceContext); + } + BytesRef tsid = reducedBucket.key; + assert prevTsid == null || tsid.compareTo(prevTsid) > 0; + reduced.buckets.add(reducedBucket); + if (size != null && reduced.buckets.size() >= size) { break; } + prevTsid = tsid; } + return reduced; } - - InternalBucket reducedBucket; - if (bucketsWithSameKey.size() == 1) { - reducedBucket = bucketsWithSameKey.get(0); - reducedBucket.aggregations = InternalAggregations.reduce(List.of(reducedBucket.aggregations), reduceContext); - } else { - reducedBucket = reduceBucket(bucketsWithSameKey, reduceContext); - } - BytesRef tsid = reducedBucket.key; - assert prevTsid == null || tsid.compareTo(prevTsid) > 0; - reduced.buckets.add(reducedBucket); - if (size != null && reduced.buckets.size() >= size) { - break; - } - prevTsid = tsid; - } - return reduced; + }; } @Override @@ -252,19 +254,17 @@ public InternalBucket createBucket(InternalAggregations aggregations, InternalBu return new InternalBucket(prototype.key, prototype.docCount, aggregations, prototype.keyed); } - @Override - protected InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { + private InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { InternalTimeSeries.InternalBucket reduced = null; - List aggregationsList = new ArrayList<>(buckets.size()); for (InternalTimeSeries.InternalBucket bucket : buckets) { if (reduced == null) { reduced = new InternalTimeSeries.InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed); } else { reduced.docCount += bucket.docCount; } - aggregationsList.add(bucket.aggregations); } - reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); + final List aggregations = new BucketAggregationList<>(buckets); + reduced.aggregations = InternalAggregations.reduce(aggregations, context); return reduced; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java index c7428393b275a..e5c76300d3de4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; @@ -237,36 +238,47 @@ public Object getProperty(List path) { } @Override - public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { - // merge stats across all shards - List aggs = new ArrayList<>(aggregations); - aggs.removeIf(p -> ((InternalMatrixStats) p).stats == null); + protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceContext, int size) { + final List aggregations = new ArrayList<>(size); + return new AggregatorReducer() { + @Override + public void accept(InternalAggregation aggregation) { + // TODO: probably can be done in without collecting the aggregators + final InternalMatrixStats internalMatrixStats = (InternalMatrixStats) aggregation; + if (internalMatrixStats.stats != null) { + aggregations.add(internalMatrixStats); + } + } - // return empty result iff all stats are null - if (aggs.isEmpty()) { - return new InternalMatrixStats(name, 0, null, new MatrixStatsResults(), getMetadata()); - } + @Override + public InternalAggregation get() { + // return empty result iff all stats are null + if (aggregations.isEmpty()) { + return new InternalMatrixStats(name, 0, null, new MatrixStatsResults(), getMetadata()); + } - RunningStats runningStats = new RunningStats(); - for (InternalAggregation agg : aggs) { - final Set missingFields = runningStats.missingFieldNames(((InternalMatrixStats) agg).stats); - if (missingFields.isEmpty() == false) { - throw new IllegalArgumentException( - "Aggregation [" - + agg.getName() - + "] all fields must exist in all indices, but some indices are missing these fields [" - + String.join(", ", new TreeSet<>(missingFields)) - + "]" - ); - } - runningStats.merge(((InternalMatrixStats) agg).stats); - } + RunningStats runningStats = new RunningStats(); + for (InternalMatrixStats agg : aggregations) { + final Set missingFields = runningStats.missingFieldNames(agg.stats); + if (missingFields.isEmpty() == false) { + throw new IllegalArgumentException( + "Aggregation [" + + agg.getName() + + "] all fields must exist in all indices, but some indices are missing these fields [" + + String.join(", ", new TreeSet<>(missingFields)) + + "]" + ); + } + runningStats.merge(agg.stats); + } - if (reduceContext.isFinalReduce()) { - MatrixStatsResults matrixStatsResults = new MatrixStatsResults(runningStats); - return new InternalMatrixStats(name, matrixStatsResults.getDocCount(), runningStats, matrixStatsResults, getMetadata()); - } - return new InternalMatrixStats(name, runningStats.docCount, runningStats, null, getMetadata()); + if (reduceContext.isFinalReduce()) { + MatrixStatsResults matrixStatsResults = new MatrixStatsResults(runningStats); + return new InternalMatrixStats(name, matrixStatsResults.getDocCount(), runningStats, matrixStatsResults, getMetadata()); + } + return new InternalMatrixStats(name, runningStats.docCount, runningStats, null, getMetadata()); + } + }; } @Override diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index ad7b1f3c1efbe..e7af9f5745d6d 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.test.TransportVersionUtils; import java.io.IOException; @@ -421,7 +422,7 @@ ReduceTestBuilder finishShardResult(String whichRounding, int innerInterval) { InternalAutoDateHistogram reduce() { assertThat("finishShardResult must be called before reduce", buckets, empty()); - return (InternalAutoDateHistogram) results.get(0).reduce(results, emptyReduceContextBuilder().forFinalReduction()); + return (InternalAutoDateHistogram) InternalAggregationTestCase.reduce(results, emptyReduceContextBuilder().forFinalReduction()); } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java index 4a8b2c98aef14..a77ea04a2d8a1 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.test.InternalAggregationTestCase; import java.io.IOException; import java.io.UncheckedIOException; @@ -141,7 +142,7 @@ public void testReduceSimple() { PipelineAggregator.PipelineTree.EMPTY ); - InternalTimeSeries result = (InternalTimeSeries) first.reduce(List.of(first, second, third), context); + InternalTimeSeries result = (InternalTimeSeries) InternalAggregationTestCase.reduce(List.of(first, second, third), context); assertThat(result.getBuckets().get(0).key.utf8ToString(), equalTo("1")); assertThat(result.getBuckets().get(0).getDocCount(), equalTo(5L)); assertThat(result.getBuckets().get(1).key.utf8ToString(), equalTo("10")); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java index 66dcd6dc1cb92..82bee40e13ca1 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java @@ -148,7 +148,7 @@ public void testReduceRandom() { b -> {}, PipelineTree.EMPTY ); - InternalMatrixStats reduced = (InternalMatrixStats) shardResults.get(0).reduce(shardResults, context); + InternalMatrixStats reduced = (InternalMatrixStats) InternalAggregationTestCase.reduce(shardResults, context); multiPassStats.assertNearlyEqual(reduced.getResults()); } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeResultTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeResultTests.java index e0a41ca7bddd9..bbbf302b08147 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeResultTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeResultTests.java @@ -36,7 +36,7 @@ protected Derivative createTestInstance(String name, Map metadat @Override public void testReduceRandom() { - expectThrows(UnsupportedOperationException.class, () -> createTestInstance("name", null).reduce(null, null)); + expectThrows(UnsupportedOperationException.class, () -> createTestInstance("name", null).getReducer(null, 0)); } @Override diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java new file mode 100644 index 0000000000000..eca45b45d1269 --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java @@ -0,0 +1,473 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.resolve.ResolveClusterActionRequest; +import org.elasticsearch.action.admin.indices.resolve.ResolveClusterActionResponse; +import org.elasticsearch.action.admin.indices.resolve.ResolveClusterInfo; +import org.elasticsearch.action.admin.indices.resolve.TransportResolveClusterAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xcontent.ObjectPath; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; + +/** + * Tests the ResolveClusterAction around matching data streams. + * ResolveClusterIT is a sibling IT test that does additional testing + * not related to data streams. + */ +public class ResolveClusterDataStreamIT extends AbstractMultiClustersTestCase { + + private static final String REMOTE_CLUSTER_1 = "remote1"; + private static final String REMOTE_CLUSTER_2 = "remote2"; + private static long EARLIEST_TIMESTAMP = 1691348810000L; + private static long LATEST_TIMESTAMP = 1691348820000L; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + } + + @Override + protected Map skipUnavailableForRemoteClusters() { + return Map.of(REMOTE_CLUSTER_1, randomBoolean(), REMOTE_CLUSTER_2, true); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + return List.of(DataStreamsPlugin.class); + } + + public void testClusterResolveWithDataStreams() throws Exception { + Map testClusterInfo = setupThreeClusters(false); + String localDataStream = (String) testClusterInfo.get("local.datastream"); + String remoteDataStream1 = (String) testClusterInfo.get("remote1.datastream"); + String remoteIndex2 = (String) testClusterInfo.get("remote2.index"); + boolean skipUnavailable1 = (Boolean) testClusterInfo.get("remote1.skip_unavailable"); + boolean skipUnavailable2 = true; + + // test all clusters against data streams (present only on local and remote1) + { + String[] indexExpressions = new String[] { + localDataStream, + REMOTE_CLUSTER_1 + ":" + remoteDataStream1, + REMOTE_CLUSTER_2 + ":" + remoteDataStream1 // does not exist on remote2 + }; + ResolveClusterActionRequest request = new ResolveClusterActionRequest(indexExpressions); + + ActionFuture future = client(LOCAL_CLUSTER).admin() + .indices() + .execute(TransportResolveClusterAction.TYPE, request); + ResolveClusterActionResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertNotNull(response); + + Map clusterInfo = response.getResolveClusterInfo(); + assertEquals(3, clusterInfo.size()); + Set expectedClusterNames = Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames)); + + ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1); + assertThat(remote1.isConnected(), equalTo(true)); + assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1)); + assertThat(remote1.getMatchingIndices(), equalTo(true)); + assertNotNull(remote1.getBuild().version()); + assertNull(remote1.getError()); + + ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2); + assertThat(remote2.isConnected(), equalTo(true)); + assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2)); + assertNull(remote2.getMatchingIndices()); + assertNull(remote2.getBuild()); + assertNotNull(remote2.getError()); + assertThat(remote2.getError(), containsString("no such index [" + remoteDataStream1 + "]")); + + ResolveClusterInfo local = clusterInfo.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(local.isConnected(), equalTo(true)); + assertThat(local.getSkipUnavailable(), equalTo(false)); + assertThat(local.getMatchingIndices(), equalTo(true)); + assertNotNull(local.getBuild().version()); + assertNull(local.getError()); + } + + // test clusters against datastream or indices, such that all should match + { + String[] indexExpressions = new String[] { + localDataStream, + REMOTE_CLUSTER_1 + ":" + remoteDataStream1, + REMOTE_CLUSTER_2 + ":" + remoteIndex2 }; + ResolveClusterActionRequest request = new ResolveClusterActionRequest(indexExpressions); + + ActionFuture future = client(LOCAL_CLUSTER).admin() + .indices() + .execute(TransportResolveClusterAction.TYPE, request); + ResolveClusterActionResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertNotNull(response); + + Map clusterInfo = response.getResolveClusterInfo(); + assertEquals(3, clusterInfo.size()); + Set expectedClusterNames = Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames)); + + ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1); + assertThat(remote1.isConnected(), equalTo(true)); + assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1)); + assertThat(remote1.getMatchingIndices(), equalTo(true)); + assertNotNull(remote1.getBuild().version()); + assertNull(remote1.getError()); + + ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2); + assertThat(remote2.isConnected(), equalTo(true)); + assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2)); + assertThat(remote2.getMatchingIndices(), equalTo(true)); + assertNotNull(remote2.getBuild().version()); + assertNull(remote2.getError()); + + ResolveClusterInfo local = clusterInfo.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(local.isConnected(), equalTo(true)); + assertThat(local.getSkipUnavailable(), equalTo(false)); + assertThat(local.getMatchingIndices(), equalTo(true)); + assertNotNull(local.getBuild().version()); + assertNull(local.getError()); + } + + // test wildcards against datastream names + { + String[] indexExpressions = new String[] { + localDataStream.substring(0, 3) + "*", + REMOTE_CLUSTER_1.substring(0, 3) + "*:" + remoteDataStream1.substring(0, 3) + "*", + REMOTE_CLUSTER_2 + ":" + remoteIndex2.substring(0, 2) + "*" }; + ResolveClusterActionRequest request = new ResolveClusterActionRequest(indexExpressions); + + ActionFuture future = client(LOCAL_CLUSTER).admin() + .indices() + .execute(TransportResolveClusterAction.TYPE, request); + ResolveClusterActionResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertNotNull(response); + + Map clusterInfo = response.getResolveClusterInfo(); + assertEquals(3, clusterInfo.size()); + Set expectedClusterNames = Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames)); + + ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1); + assertThat(remote1.isConnected(), equalTo(true)); + assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1)); + assertThat(remote1.getMatchingIndices(), equalTo(true)); + assertNotNull(remote1.getBuild().version()); + assertNull(remote1.getError()); + + ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2); + assertThat(remote2.isConnected(), equalTo(true)); + assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2)); + assertThat(remote2.getMatchingIndices(), equalTo(true)); + assertNotNull(remote2.getBuild().version()); + assertNull(remote2.getError()); + + ResolveClusterInfo local = clusterInfo.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(local.isConnected(), equalTo(true)); + assertThat(local.getSkipUnavailable(), equalTo(false)); + assertThat(local.getMatchingIndices(), equalTo(true)); + assertNotNull(local.getBuild().version()); + assertNull(local.getError()); + } + + // test remote only clusters + { + String[] indexExpressions = new String[] { + REMOTE_CLUSTER_1 + ":" + remoteDataStream1, + REMOTE_CLUSTER_2 + ":" + remoteIndex2.substring(0, 2) + "*" }; + ResolveClusterActionRequest request = new ResolveClusterActionRequest(indexExpressions); + + ActionFuture future = client(LOCAL_CLUSTER).admin() + .indices() + .execute(TransportResolveClusterAction.TYPE, request); + ResolveClusterActionResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertNotNull(response); + + Map clusterInfo = response.getResolveClusterInfo(); + assertEquals(2, clusterInfo.size()); + Set expectedClusterNames = Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames)); + + ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1); + assertThat(remote1.isConnected(), equalTo(true)); + assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1)); + assertThat(remote1.getMatchingIndices(), equalTo(true)); + assertNotNull(remote1.getBuild().version()); + assertNull(remote1.getError()); + + ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2); + assertThat(remote2.isConnected(), equalTo(true)); + assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2)); + assertThat(remote2.getMatchingIndices(), equalTo(true)); + assertNotNull(remote2.getBuild().version()); + assertNull(remote2.getError()); + } + } + + public void testClusterResolveWithDataStreamsUsingAlias() throws Exception { + Map testClusterInfo = setupThreeClusters(true); + String localDataStreamAlias = (String) testClusterInfo.get("local.datastream.alias"); + String remoteDataStream1Alias = (String) testClusterInfo.get("remote1.datastream.alias"); + String remoteIndex2 = (String) testClusterInfo.get("remote2.index"); + boolean skipUnavailable1 = (Boolean) testClusterInfo.get("remote1.skip_unavailable"); + boolean skipUnavailable2 = true; + + // test all clusters against datastream alias (present only on local and remote1) + { + String[] indexExpressions = new String[] { + localDataStreamAlias, + REMOTE_CLUSTER_1 + ":" + remoteDataStream1Alias, + REMOTE_CLUSTER_2 + ":" + remoteIndex2 }; + ResolveClusterActionRequest request = new ResolveClusterActionRequest(indexExpressions); + + ActionFuture future = client(LOCAL_CLUSTER).admin() + .indices() + .execute(TransportResolveClusterAction.TYPE, request); + ResolveClusterActionResponse response = future.actionGet(10, TimeUnit.SECONDS); + assertNotNull(response); + + Map clusterInfo = response.getResolveClusterInfo(); + assertEquals(3, clusterInfo.size()); + Set expectedClusterNames = Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + assertThat(clusterInfo.keySet(), equalTo(expectedClusterNames)); + + ResolveClusterInfo remote1 = clusterInfo.get(REMOTE_CLUSTER_1); + assertThat(remote1.isConnected(), equalTo(true)); + assertThat(remote1.getSkipUnavailable(), equalTo(skipUnavailable1)); + assertThat(remote1.getMatchingIndices(), equalTo(true)); + assertNotNull(remote1.getBuild().version()); + assertNull(remote1.getError()); + + ResolveClusterInfo remote2 = clusterInfo.get(REMOTE_CLUSTER_2); + assertThat(remote2.isConnected(), equalTo(true)); + assertThat(remote2.getSkipUnavailable(), equalTo(skipUnavailable2)); + assertThat(remote2.getMatchingIndices(), equalTo(true)); + assertNotNull(remote2.getBuild().version()); + assertNull(remote2.getError()); + + ResolveClusterInfo local = clusterInfo.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(local.isConnected(), equalTo(true)); + assertThat(local.getSkipUnavailable(), equalTo(false)); + assertThat(local.getMatchingIndices(), equalTo(true)); + assertNotNull(local.getBuild().version()); + assertNull(local.getError()); + } + } + + private Map setupThreeClusters(boolean useAlias) throws IOException, ExecutionException, InterruptedException { + String dataStreamLocal = "metrics-foo"; + String dataStreamLocalAlias = randomAlphaOfLengthBetween(5, 16); + + // set up data stream on local cluster + { + Client client = client(LOCAL_CLUSTER); + List backingIndices = new ArrayList<>(); + Map aliases = null; + if (useAlias) { + aliases = new HashMap<>(); + aliases.put(dataStreamLocalAlias, AliasMetadata.builder(dataStreamLocalAlias).writeIndex(randomBoolean()).build()); + } + putComposableIndexTemplate(client, "id1", List.of(dataStreamLocal + "*"), aliases); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + assertAcked(client.execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get()); + + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + DataStream fooDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); + String backingIndex = fooDataStream.getIndices().get(0).getName(); + backingIndices.add(backingIndex); + GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); + assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); + Map mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); + assertThat(ObjectPath.eval("properties.@timestamp.type", mappings), is("date")); + + int numDocsBar = randomIntBetween(2, 16); + indexDataStreamDocs(client, dataStreamLocal, numDocsBar); + } + + // set up data stream on remote1 cluster + String dataStreamRemote1 = "metrics-bar"; + String dataStreamRemote1Alias = randomAlphaOfLengthBetween(5, 16); + { + Client client = client(REMOTE_CLUSTER_1); + List backingIndices = new ArrayList<>(); + Map aliases = null; + if (useAlias) { + aliases = new HashMap<>(); + aliases.put(dataStreamRemote1Alias, AliasMetadata.builder(dataStreamRemote1Alias).writeIndex(randomBoolean()).build()); + } + putComposableIndexTemplate(client, "id2", List.of(dataStreamRemote1 + "*"), aliases); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-bar"); + assertAcked(client.execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get()); + + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + + DataStream barDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); + String backingIndex = barDataStream.getIndices().get(0).getName(); + backingIndices.add(backingIndex); + GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet(); + assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue()); + assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true)); + Map mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap(); + assertThat(ObjectPath.eval("properties.@timestamp.type", mappings), is("date")); + + int numDocsBar = randomIntBetween(2, 16); + indexDataStreamDocs(client, dataStreamRemote1, numDocsBar); + } + + // set up remote2 cluster and non-datastream index + + String remoteIndex2 = "prod123"; + int numShardsRemote2 = randomIntBetween(2, 4); + final InternalTestCluster remoteCluster2 = cluster(REMOTE_CLUSTER_2); + remoteCluster2.ensureAtLeastNumDataNodes(randomIntBetween(1, 2)); + final Settings.Builder remoteSettings2 = Settings.builder(); + remoteSettings2.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsRemote2); + + assertAcked( + client(REMOTE_CLUSTER_2).admin() + .indices() + .prepareCreate(remoteIndex2) + .setSettings(Settings.builder().put(remoteSettings2.build()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .setMapping("@timestamp", "type=date", "f", "type=text") + ); + assertFalse( + client(REMOTE_CLUSTER_2).admin() + .cluster() + .prepareHealth(remoteIndex2) + .setWaitForYellowStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get() + .isTimedOut() + ); + indexDocs(client(REMOTE_CLUSTER_2), remoteIndex2); + + String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER_1); + Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER_1).clusterService().getClusterSettings().get(skipUnavailableKey); + boolean skipUnavailable1 = (boolean) cluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).clusterService() + .getClusterSettings() + .get(skipUnavailableSetting); + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.datastream", dataStreamLocal); + clusterInfo.put("local.datastream.alias", dataStreamLocalAlias); + + clusterInfo.put("remote1.skip_unavailable", skipUnavailable1); + clusterInfo.put("remote1.datastream", dataStreamRemote1); + clusterInfo.put("remote1.datastream.alias", dataStreamRemote1Alias); + + clusterInfo.put("remote2.index", remoteIndex2); + clusterInfo.put("remote2.skip_unavailable", true); + + return clusterInfo; + } + + private int indexDocs(Client client, String index) { + int numDocs = between(50, 100); + for (int i = 0; i < numDocs; i++) { + long ts = EARLIEST_TIMESTAMP + i; + if (i == numDocs - 1) { + ts = LATEST_TIMESTAMP; + } + client.prepareIndex(index).setSource("f", "v", "@timestamp", ts).get(); + } + client.admin().indices().prepareRefresh(index).get(); + return numDocs; + } + + void putComposableIndexTemplate(Client client, String id, List patterns, @Nullable Map aliases) + throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(null, null, aliases, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() + ); + client.execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + } + + void indexDataStreamDocs(Client client, String dataStream, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + } + BulkResponse bulkResponse = client.bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; + for (BulkItemResponse itemResponse : bulkResponse) { + assertThat(itemResponse.getFailureMessage(), nullValue()); + assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); + assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); + } + client.admin().indices().refresh(new RefreshRequest(dataStream)).actionGet(); + } +} diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java index 734e2d7273d19..ec871b201bbdb 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateResponse.ResetFeatureStateStatus; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.IndicesOptions.Option; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -45,7 +44,6 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collection; -import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -351,10 +349,11 @@ public void cleanUpFeature(ClusterService clusterService, Client client, ActionL .collect(Collectors.toList()) .toArray(Strings.EMPTY_ARRAY) ); - EnumSet