diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 5246115d0b693..b72d3b4089b97 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -46,6 +46,7 @@ BWC_VERSION: - "1.3.7" - "1.3.8" - "1.3.9" + - "1.3.10" - "2.0.0" - "2.0.1" - "2.0.2" @@ -62,3 +63,4 @@ BWC_VERSION: - "2.5.0" - "2.5.1" - "2.6.0" + - "2.6.1" diff --git a/.github/workflows/code-hygiene.yml b/.github/workflows/code-hygiene.yml deleted file mode 100644 index a1adbb8a87507..0000000000000 --- a/.github/workflows/code-hygiene.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Code Hygiene - -on: [pull_request] - -jobs: - linelint: - runs-on: ubuntu-latest - name: Check if all files end in newline - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Linelint - uses: fernandrone/linelint@0.0.4 diff --git a/.github/workflows/github-merit-badger.yml b/.github/workflows/github-merit-badger.yml new file mode 100644 index 0000000000000..ee00e62da2f08 --- /dev/null +++ b/.github/workflows/github-merit-badger.yml @@ -0,0 +1,20 @@ +name: github-merit-badger +on: + pull_request_target: + types: + - opened + +jobs: + call-action: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - uses: aws-github-ops/github-merit-badger@v0.0.98 + id: merit-badger + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + badges: '[first-time-contributor,repeat-contributor,valued-contributor,seasoned-contributor,all-star-contributor,distinguished-contributor]' + thresholds: '[0,3,6,13,25,50]' + badge-type: 'achievement' + ignore-usernames: '[opensearch-ci-bot, dependabot, opensearch-trigger-bot]' diff --git a/.linelint.yml b/.linelint.yml deleted file mode 100644 index ec947019f8ab6..0000000000000 --- a/.linelint.yml +++ /dev/null @@ -1,49 +0,0 @@ -# 'true' will fix files -autofix: true - -ignore: - - .git/ - - .gradle/ - - .idea/ - - '*.sha1' - - '*.txt' - - 'CHANGELOG.md' - - '.github/CODEOWNERS' - - 'buildSrc/src/testKit/opensearch.build/LICENSE' - - 'buildSrc/src/testKit/opensearch.build/NOTICE' - - 'server/licenses/apache-log4j-extras-DEPENDENCIES' - # Empty files - - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/build.gradle' - - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/build.gradle' - - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/build.gradle' - - 'buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/build.gradle' - - 'buildSrc/src/main/resources/buildSrc.marker' - - 'buildSrc/src/testKit/opensearch-build-resources/settings.gradle' - - 'buildSrc/src/testKit/opensearch.build/settings.gradle' - - 'buildSrc/src/testKit/reaper/settings.gradle' - - 'buildSrc/src/testKit/symbolic-link-preserving-tar/settings.gradle' - - 'buildSrc/src/testKit/testingConventions/empty_test_task/.gitignore' - - 'client/rest-high-level/src/main/resources/META-INF/services/org.opensearch.plugins.spi.NamedXContentProvider' - - 'distribution/bwc/bugfix/build.gradle' - - 'distribution/bwc/maintenance/build.gradle' - - 'distribution/bwc/minor/build.gradle' - - 'distribution/bwc/staged/build.gradle' - - 'libs/ssl-config/src/test/resources/certs/pem-utils/empty.pem' - - 'qa/evil-tests/src/test/resources/org/opensearch/common/logging/does_not_exist/nothing_to_see_here' - - 'qa/os/centos-6/build.gradle' - - 'qa/os/debian-8/build.gradle' - - 'qa/os/oel-6/build.gradle' - - 'qa/os/oel-7/build.gradle' - - 'qa/os/sles-12/build.gradle' - # Test requires no new line for these files - - 'server/src/test/resources/org/opensearch/action/bulk/simple-bulk11.json' - - 'server/src/test/resources/org/opensearch/action/search/simple-msearch5.json' - -rules: - # checks if file ends in a newline character - end-of-file: - # set to true to enable this rule - enable: true - - # if true also checks if file ends in a single newline character - single-new-line: true diff --git a/CHANGELOG.md b/CHANGELOG.md index dbf65f07083fb..1cdf2f118d242 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,26 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Add GeoTile and GeoHash Grid aggregations on GeoShapes. ([#5589](https://github.com/opensearch-project/OpenSearch/pull/5589)) - Disallow multiple data paths for search nodes ([#6427](https://github.com/opensearch-project/OpenSearch/pull/6427)) +- [Segment Replication] Allocation and rebalancing based on average primary shard count per index ([#6422](https://github.com/opensearch-project/OpenSearch/pull/6422)) +- The truncation limit of the OpenSearchJsonLayout logger is now configurable ([#6569](https://github.com/opensearch-project/OpenSearch/pull/6569)) +- Add 'base_path' setting to File System Repository ([#6558](https://github.com/opensearch-project/OpenSearch/pull/6558)) +- Return success on DeletePits when no PITs exist. ([#6544](https://github.com/opensearch-project/OpenSearch/pull/6544)) +- Add node repurpose command for search nodes ([#6517](https://github.com/opensearch-project/OpenSearch/pull/6517)) +- [Segment Replication] Apply backpressure when replicas fall behind ([#6563](https://github.com/opensearch-project/OpenSearch/pull/6563)) ### Dependencies +- Bump `org.apache.logging.log4j:log4j-core` from 2.18.0 to 2.20.0 ([#6490](https://github.com/opensearch-project/OpenSearch/pull/6490)) +- Bump `com.azure:azure-storage-common` from 12.19.3 to 12.20.0 ([#6492](https://github.com/opensearch-project/OpenSearch/pull/6492) +- Bump `snakeyaml` from 1.33 to 2.0 ([#6511](https://github.com/opensearch-project/OpenSearch/pull/6511)) +- Bump `io.projectreactor.netty:reactor-netty` from 1.1.3 to 1.1.4 +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.15.2 to 0.16.11 +- Bump `net.minidev:json-smart` from 2.4.8 to 2.4.9 +- Bump `com.google.protobuf:protobuf-java` from 3.22.0 to 3.22.2 +- Bump Netty to 4.1.90.Final ([#6677](https://github.com/opensearch-project/OpenSearch/pull/6677) +- Bump `com.diffplug.spotless` from 6.15.0 to 6.17.0 +- Bump `org.apache.zookeeper:zookeeper` from 3.8.0 to 3.8.1 +- Bump `net.minidev:json-smart` from 2.4.7 to 2.4.10 +- Bump `org.apache.maven:maven-model` from 3.6.2 to 3.9.1 ### Changed - Require MediaType in Strings.toString API ([#6009](https://github.com/opensearch-project/OpenSearch/pull/6009)) @@ -16,11 +34,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a new field type: flat_object ([#6507](https://github.com/opensearch-project/OpenSearch/pull/6507)) ### Deprecated +- Map, List, and Set in org.opensearch.common.collect ([#6609](https://github.com/opensearch-project/OpenSearch/pull/6609)) ### Removed ### Fixed +- Added depth check in doc parser for deep nested document ([#5199](https://github.com/opensearch-project/OpenSearch/pull/5199)) +- Added equals/hashcode for named DocValueFormat.DateTime inner class ([#6357](https://github.com/opensearch-project/OpenSearch/pull/6357)) +- Fixed bug for searchable snapshot to take 'base_path' of blob into account ([#6558](https://github.com/opensearch-project/OpenSearch/pull/6558)) +- Fix fuzziness validation ([#5805](https://github.com/opensearch-project/OpenSearch/pull/5805)) ### Security +[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD [Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.5...2.x diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java index 6d8721ce64090..e49ae187acea7 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java @@ -64,9 +64,7 @@ @State(Scope.Benchmark) public class StringTermsSerializationBenchmark { private static final NamedWriteableRegistry REGISTRY = new NamedWriteableRegistry( - org.opensearch.common.collect.List.of( - new NamedWriteableRegistry.Entry(InternalAggregation.class, StringTerms.NAME, StringTerms::new) - ) + List.of(new NamedWriteableRegistry.Entry(InternalAggregation.class, StringTerms.NAME, StringTerms::new)) ); @Param(value = { "1000" }) private int buckets; @@ -75,15 +73,13 @@ public class StringTermsSerializationBenchmark { @Setup public void initResults() { - results = DelayableWriteable.referencing(InternalAggregations.from(org.opensearch.common.collect.List.of(newTerms(true)))); + results = DelayableWriteable.referencing(InternalAggregations.from(List.of(newTerms(true)))); } private StringTerms newTerms(boolean withNested) { List resultBuckets = new ArrayList<>(buckets); for (int i = 0; i < buckets; i++) { - InternalAggregations inner = withNested - ? InternalAggregations.from(org.opensearch.common.collect.List.of(newTerms(false))) - : InternalAggregations.EMPTY; + InternalAggregations inner = withNested ? InternalAggregations.from(List.of(newTerms(false))) : InternalAggregations.EMPTY; resultBuckets.add(new StringTerms.Bucket(new BytesRef("test" + i), i, inner, false, 0, DocValueFormat.RAW)); } return new StringTerms( diff --git a/build.gradle b/build.gradle index 4f432bc100678..be733233f60b5 100644 --- a/build.gradle +++ b/build.gradle @@ -45,7 +45,6 @@ import org.gradle.plugins.ide.eclipse.model.AccessRule import org.gradle.plugins.ide.eclipse.model.EclipseJdt import org.gradle.plugins.ide.eclipse.model.SourceFolder import org.gradle.api.Project; -import org.gradle.api.internal.tasks.testing.junit.JUnitTestFramework import org.gradle.process.ExecResult; import static org.opensearch.gradle.util.GradleUtils.maybeConfigure @@ -54,7 +53,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.15.0" apply false + id "com.diffplug.spotless" version "6.17.0" apply false id "org.gradle.test-retry" version "1.5.1" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' @@ -76,19 +75,6 @@ allprojects { group = 'org.opensearch' version = VersionProperties.getOpenSearch() description = "OpenSearch subproject ${project.path}" - - afterEvaluate { - project.tasks.withType(Test) { task -> - // This is so hacky: now, by default, test tasks uses JUnit framework and always includes 'junit' - // JARs from the Gradle distribution (no ways to override this behavior). It causes JAR hell on test - // classpath, example of the report: - // - // jar1: /home/ubuntu/.gradle/caches/modules-2/files-2.1/junit/junit/4.13.2/8ac9e16d933b6fb43bc7f576336b8f4d7eb5ba12/junit-4.13.2.jar - // jar2: /home/ubuntu/.gradle/wrapper/dists/gradle-8.0-rc-1-all/2p8rgxxewg8l61n1p3vrzr9s8/gradle-8.0-rc-1/lib/junit-4.13.2.jar - // - task.getTestFrameworkProperty().convention(getProviderFactory().provider(() -> new JUnitTestFramework(task, task.getFilter(), false))); - } - } } configure(allprojects - project(':distribution:archives:integ-test-zip')) { diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index aeeaf90f51c61..e6415d0a58f17 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -115,9 +115,9 @@ dependencies { api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.4' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.11' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" - api 'org.apache.maven:maven-model:3.6.2' + api 'org.apache.maven:maven-model:3.9.1' api 'com.networknt:json-schema-validator:1.0.73' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportPlugin.java index 92777638982d2..5455875a96611 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportPlugin.java @@ -56,13 +56,9 @@ public void apply(Project project) { Provider dockerSupportServiceProvider = project.getGradle() .getSharedServices() - .registerIfAbsent( - DOCKER_SUPPORT_SERVICE_NAME, - DockerSupportService.class, - spec -> spec.parameters( - params -> { params.setExclusionsFile(new File(project.getRootDir(), DOCKER_ON_LINUX_EXCLUSIONS_FILE)); } - ) - ); + .registerIfAbsent(DOCKER_SUPPORT_SERVICE_NAME, DockerSupportService.class, spec -> spec.parameters(params -> { + params.setExclusionsFile(new File(project.getRootDir(), DOCKER_ON_LINUX_EXCLUSIONS_FILE)); + })); // Ensure that if we are trying to run any DockerBuildTask tasks, we assert an available Docker installation exists project.getGradle().getTaskGraph().whenReady(graph -> { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index dd2393702fe2b..33869e76680cd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -76,12 +76,9 @@ public InternalDistributionBwcSetupPlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); - BuildParams.getBwcVersions() - .forPreviousUnreleased( - (BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { - configureBwcProject(project.project(unreleasedVersion.gradleProjectPath), unreleasedVersion); - } - ); + BuildParams.getBwcVersions().forPreviousUnreleased((BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { + configureBwcProject(project.project(unreleasedVersion.gradleProjectPath), unreleasedVersion); + }); } private void configureBwcProject(Project project, BwcVersions.UnreleasedVersionInfo versionInfo) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 64e30700360b3..3bb92d826e5eb 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -197,13 +197,9 @@ public void apply(Project project) { // windows boxes get windows distributions, and linux boxes get linux distributions if (isWindows(vmProject)) { - configureVMWrapperTasks( - vmProject, - windowsTestTasks, - depsTasks, - wrapperTask -> { vmLifecyleTasks.get(OpenSearchDistribution.Type.ARCHIVE).configure(t -> t.dependsOn(wrapperTask)); }, - vmDependencies - ); + configureVMWrapperTasks(vmProject, windowsTestTasks, depsTasks, wrapperTask -> { + vmLifecyleTasks.get(OpenSearchDistribution.Type.ARCHIVE).configure(t -> t.dependsOn(wrapperTask)); + }, vmDependencies); } else { for (Entry>> entry : linuxTestTasks.entrySet()) { OpenSearchDistribution.Type type = entry.getKey(); diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 820c303a2e79f..b31e3b168c587 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -16,7 +16,7 @@ repositories { mavenCentral() } dependencies { - implementation 'org.apache.logging.log4j:log4j-core:2.18.0' + implementation 'org.apache.logging.log4j:log4j-core:2.20.0' } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4241255bcbf6f..7262080d06c61 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -9,7 +9,7 @@ spatial4j = 0.7 jts = 1.15.0 jackson = 2.14.2 jackson_databind = 2.14.2 -snakeyaml = 1.33 +snakeyaml = 2.0 icu4j = 70.1 supercsv = 2.4.0 # Update to 2.17.2+ is breaking OpenSearchJsonLayout (see https://issues.apache.org/jira/browse/LOG4J2-3562) @@ -25,7 +25,7 @@ guava = 31.1-jre # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.87.Final +netty = 4.1.90.Final joda = 2.12.2 # client dependencies @@ -46,9 +46,9 @@ bouncycastle=1.70 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.1.0 +mockito = 5.2.0 objenesis = 3.2 -bytebuddy = 1.12.22 +bytebuddy = 1.14.2 # benchmark dependencies jmh = 1.35 diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/MainResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/MainResponse.java index b15ef2e699ca5..3f191885071a1 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/MainResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/MainResponse.java @@ -43,7 +43,9 @@ public class MainResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( MainResponse.class.getName(), true, - args -> { return new MainResponse((String) args[0], (Version) args[1], (String) args[2], (String) args[3]); } + args -> { + return new MainResponse((String) args[0], (Version) args[1], (String) args[2], (String) args[3]); + } ); static { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java index fa13abf72207e..9579b0eb90cb0 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java @@ -240,7 +240,9 @@ public static final class FieldStatistics { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "field_statistics", true, - args -> { return new FieldStatistics((long) args[0], (int) args[1], (long) args[2]); } + args -> { + return new FieldStatistics((long) args[0], (int) args[1], (long) args[2]); + } ); static { @@ -411,11 +413,9 @@ public int hashCode() { public static final class Token { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "token", - true, - args -> { return new Token((Integer) args[0], (Integer) args[1], (Integer) args[2], (String) args[3]); } - ); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token", true, args -> { + return new Token((Integer) args[0], (Integer) args[1], (Integer) args[2], (String) args[3]); + }); static { PARSER.declareInt(optionalConstructorArg(), new ParseField("start_offset")); PARSER.declareInt(optionalConstructorArg(), new ParseField("end_offset")); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java index 999c2a0e7643b..ad7198195c0ee 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java @@ -628,10 +628,9 @@ public void testIndex() throws IOException { assertEquals("index", indexResponse.getIndex()); assertEquals("with_create_op_type", indexResponse.getId()); - OpenSearchStatusException exception = expectThrows( - OpenSearchStatusException.class, - () -> { execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); } - ); + OpenSearchStatusException exception = expectThrows(OpenSearchStatusException.class, () -> { + execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + }); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals( diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index f9c8851f8839e..9cdf5beb71906 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -2032,8 +2032,8 @@ public void testSimulateIndexTemplate() throws Exception { Settings settings = Settings.builder().put("index.number_of_shards", 1).build(); CompressedXContent mappings = new CompressedXContent("{\"properties\":{\"host_name\":{\"type\":\"keyword\"}}}"); AliasMetadata alias = AliasMetadata.builder("alias").writeIndex(true).build(); - Template template = new Template(settings, mappings, org.opensearch.common.collect.Map.of("alias", alias)); - List pattern = org.opensearch.common.collect.List.of("pattern"); + Template template = new Template(settings, mappings, Map.of("alias", alias)); + List pattern = List.of("pattern"); ComposableIndexTemplate indexTemplate = new ComposableIndexTemplate( pattern, template, @@ -2058,7 +2058,7 @@ public void testSimulateIndexTemplate() throws Exception { AliasMetadata simulationAlias = AliasMetadata.builder("simulation-alias").writeIndex(true).build(); ComposableIndexTemplate simulationTemplate = new ComposableIndexTemplate( pattern, - new Template(null, null, org.opensearch.common.collect.Map.of("simulation-alias", simulationAlias)), + new Template(null, null, Map.of("simulation-alias", simulationAlias)), Collections.emptyList(), 2L, 1L, diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java index 78fb2aef02ee0..cbee86c34c360 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java @@ -112,10 +112,9 @@ public void testReindex() throws IOException { reindexRequest.setRefresh(true); reindexRequest.setRequireAlias(true); - OpenSearchStatusException exception = expectThrows( - OpenSearchStatusException.class, - () -> { execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); } - ); + OpenSearchStatusException exception = expectThrows(OpenSearchStatusException.class, () -> { + execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); + }); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals( "OpenSearch exception [type=index_not_found_exception, reason=no such index [dest] and [require_alias] request flag is [true] and [dest] is not an alias]", diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index a50cc811a87dc..9d1b83a32fb02 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -427,13 +427,9 @@ public void testPerformRequestOnSuccess() throws IOException { { IOException ioe = expectThrows( IOException.class, - () -> restHighLevelClient.performRequest( - mainRequest, - requestConverter, - RequestOptions.DEFAULT, - response -> { throw new IllegalStateException(); }, - Collections.emptySet() - ) + () -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT, response -> { + throw new IllegalStateException(); + }, Collections.emptySet()) ); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " @@ -575,13 +571,9 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws when(restClient.performRequest(any(Request.class))).thenThrow(responseException); OpenSearchException openSearchException = expectThrows( OpenSearchException.class, - () -> restHighLevelClient.performRequest( - mainRequest, - requestConverter, - RequestOptions.DEFAULT, - response -> { throw new IllegalStateException(); }, - Collections.singleton(404) - ) + () -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT, response -> { + throw new IllegalStateException(); + }, Collections.singleton(404)) ); assertEquals(RestStatus.NOT_FOUND, openSearchException.status()); assertSame(responseException, openSearchException.getCause()); @@ -598,13 +590,9 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() thr when(restClient.performRequest(any(Request.class))).thenThrow(responseException); OpenSearchException openSearchException = expectThrows( OpenSearchException.class, - () -> restHighLevelClient.performRequest( - mainRequest, - requestConverter, - RequestOptions.DEFAULT, - response -> { throw new IllegalStateException(); }, - Collections.singleton(404) - ) + () -> restHighLevelClient.performRequest(mainRequest, requestConverter, RequestOptions.DEFAULT, response -> { + throw new IllegalStateException(); + }, Collections.singleton(404)) ); assertEquals(RestStatus.NOT_FOUND, openSearchException.status()); assertSame(responseException, openSearchException.getSuppressed()[0]); diff --git a/gradle/formatting.gradle b/gradle/formatting.gradle index e0939fa9766f2..9066a122b71b2 100644 --- a/gradle/formatting.gradle +++ b/gradle/formatting.gradle @@ -68,6 +68,7 @@ allprojects { removeUnusedImports() eclipse().configFile rootProject.file('buildSrc/formatterConfig.xml') trimTrailingWhitespace() + endWithNewline() custom 'Refuse wildcard imports', { // Wildcard imports can't be resolved; fail the build @@ -81,6 +82,12 @@ allprojects { paddedCell() } } + format 'misc', { + target '*.md', '*.gradle', '**/*.yaml', '**/*.yml', '**/*.svg' + + trimTrailingWhitespace() + endWithNewline() + } } precommit.dependsOn 'spotlessJavaCheck' diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 967dd3ccea2d7..21b7e71e15d65 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.0.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.0.2-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=948d1e4ccc2f6ae36cbfa087d827aaca51403acae5411e664a6000bb47946f22 +distributionSha256Sum=47a5bfed9ef814f90f8debcbbb315e8e7c654109acd224595ea39fca95c5d4da diff --git a/libs/common/src/main/java/org/opensearch/common/collect/List.java b/libs/common/src/main/java/org/opensearch/common/collect/List.java index c28a69a515d35..07cfc2c019856 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/List.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/List.java @@ -37,10 +37,10 @@ /** * Java 9 List * - * todo: deprecate and remove w/ min jdk upgrade to 11? * * @opensearch.internal */ +@Deprecated(forRemoval = true) public class List { /** diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Map.java b/libs/common/src/main/java/org/opensearch/common/collect/Map.java index a0b8c03d3d3e4..3913c0fd942a4 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Map.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Map.java @@ -35,10 +35,10 @@ /** * Java 9 Map * - * todo: deprecate and remove w/ min jdk upgrade to 11? * * @opensearch.internal */ +@Deprecated(forRemoval = true) public class Map { /** diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Set.java b/libs/common/src/main/java/org/opensearch/common/collect/Set.java index 11d59cead6009..0c3899b2aaacd 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Set.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Set.java @@ -37,10 +37,10 @@ /** * Java 9 Set * - * todo: deprecate and remove w/ min jdk upgrade to 11? * * @opensearch.internal */ +@Deprecated(forRemoval = true) public class Set { /** diff --git a/libs/common/src/test/java/org/opensearch/common/collect/ListTests.java b/libs/common/src/test/java/org/opensearch/common/collect/ListTests.java deleted file mode 100644 index 70841a102b783..0000000000000 --- a/libs/common/src/test/java/org/opensearch/common/collect/ListTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Arrays; -import java.util.Collection; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class ListTests extends OpenSearchTestCase { - - public void testStringListOfZero() { - final String[] strings = {}; - final java.util.List stringsList = List.of(strings); - assertThat(stringsList.size(), equalTo(strings.length)); - assertTrue(stringsList.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsList.add("foo")); - } - - public void testStringListOfOne() { - final String[] strings = { "foo" }; - final java.util.List stringsList = List.of(strings); - assertThat(stringsList.size(), equalTo(strings.length)); - assertTrue(stringsList.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsList.add("foo")); - } - - public void testStringListOfTwo() { - final String[] strings = { "foo", "bar" }; - final java.util.List stringsList = List.of(strings); - assertThat(stringsList.size(), equalTo(strings.length)); - assertTrue(stringsList.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsList.add("foo")); - } - - public void testStringListOfN() { - final String[] strings = { "foo", "bar", "baz" }; - final java.util.List stringsList = List.of(strings); - assertThat(stringsList.size(), equalTo(strings.length)); - assertTrue(stringsList.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsList.add("foo")); - } - - public void testCopyOf() { - final Collection coll = Arrays.asList("foo", "bar", "baz"); - final java.util.List copy = List.copyOf(coll); - assertThat(coll.size(), equalTo(copy.size())); - assertTrue(copy.containsAll(coll)); - expectThrows(UnsupportedOperationException.class, () -> copy.add("foo")); - } -} diff --git a/libs/common/src/test/java/org/opensearch/common/collect/MapTests.java b/libs/common/src/test/java/org/opensearch/common/collect/MapTests.java deleted file mode 100644 index 8d7ffa71df562..0000000000000 --- a/libs/common/src/test/java/org/opensearch/common/collect/MapTests.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import org.opensearch.test.OpenSearchTestCase; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class MapTests extends OpenSearchTestCase { - - private static final String[] numbers = { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" }; - - public void testMapOfZero() { - final java.util.Map map = Map.of(); - validateMapContents(map, 0); - } - - public void testMapOfOne() { - final java.util.Map map = Map.of(numbers[0], 0); - validateMapContents(map, 1); - } - - public void testMapOfTwo() { - final java.util.Map map = Map.of(numbers[0], 0, numbers[1], 1); - validateMapContents(map, 2); - } - - public void testMapOfThree() { - final java.util.Map map = Map.of(numbers[0], 0, numbers[1], 1, numbers[2], 2); - validateMapContents(map, 3); - } - - public void testMapOfFour() { - final java.util.Map map = Map.of(numbers[0], 0, numbers[1], 1, numbers[2], 2, numbers[3], 3); - validateMapContents(map, 4); - } - - public void testMapOfFive() { - final java.util.Map map = Map.of(numbers[0], 0, numbers[1], 1, numbers[2], 2, numbers[3], 3, numbers[4], 4); - validateMapContents(map, 5); - } - - public void testMapOfSix() { - final java.util.Map map = Map.of( - numbers[0], - 0, - numbers[1], - 1, - numbers[2], - 2, - numbers[3], - 3, - numbers[4], - 4, - numbers[5], - 5 - ); - validateMapContents(map, 6); - } - - public void testMapOfSeven() { - final java.util.Map map = Map.of( - numbers[0], - 0, - numbers[1], - 1, - numbers[2], - 2, - numbers[3], - 3, - numbers[4], - 4, - numbers[5], - 5, - numbers[6], - 6 - ); - validateMapContents(map, 7); - } - - public void testMapOfEight() { - final java.util.Map map = Map.of( - numbers[0], - 0, - numbers[1], - 1, - numbers[2], - 2, - numbers[3], - 3, - numbers[4], - 4, - numbers[5], - 5, - numbers[6], - 6, - numbers[7], - 7 - ); - validateMapContents(map, 8); - } - - public void testMapOfNine() { - final java.util.Map map = Map.of( - numbers[0], - 0, - numbers[1], - 1, - numbers[2], - 2, - numbers[3], - 3, - numbers[4], - 4, - numbers[5], - 5, - numbers[6], - 6, - numbers[7], - 7, - numbers[8], - 8 - ); - validateMapContents(map, 9); - } - - public void testMapOfTen() { - final java.util.Map map = Map.of( - numbers[0], - 0, - numbers[1], - 1, - numbers[2], - 2, - numbers[3], - 3, - numbers[4], - 4, - numbers[5], - 5, - numbers[6], - 6, - numbers[7], - 7, - numbers[8], - 8, - numbers[9], - 9 - ); - validateMapContents(map, 10); - } - - private static void validateMapContents(java.util.Map map, int size) { - assertThat(map.size(), equalTo(size)); - for (int k = 0; k < map.size(); k++) { - assertEquals(Integer.class, map.get(numbers[k]).getClass()); - assertThat(k, equalTo(map.get(numbers[k]))); - } - expectThrows(UnsupportedOperationException.class, () -> map.put("foo", 42)); - } - - public void testOfEntries() { - final java.util.Map map = Map.ofEntries( - Map.entry(numbers[0], 0), - Map.entry(numbers[1], 1), - Map.entry(numbers[2], 2) - ); - validateMapContents(map, 3); - } - - public void testCopyOf() { - final java.util.Map map1 = Map.of("fooK", "fooV", "barK", "barV", "bazK", "bazV"); - final java.util.Map copy = Map.copyOf(map1); - assertThat(map1.size(), equalTo(copy.size())); - for (java.util.Map.Entry entry : map1.entrySet()) { - assertEquals(entry.getValue(), copy.get(entry.getKey())); - } - expectThrows(UnsupportedOperationException.class, () -> copy.put("foo", "bar")); - } -} diff --git a/libs/common/src/test/java/org/opensearch/common/collect/SetTests.java b/libs/common/src/test/java/org/opensearch/common/collect/SetTests.java deleted file mode 100644 index 1b7f29ff0d6f1..0000000000000 --- a/libs/common/src/test/java/org/opensearch/common/collect/SetTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.collect; - -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Arrays; -import java.util.Collection; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class SetTests extends OpenSearchTestCase { - - public void testStringSetOfZero() { - final String[] strings = {}; - final java.util.Set stringsSet = Set.of(strings); - assertThat(stringsSet.size(), equalTo(strings.length)); - assertTrue(stringsSet.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsSet.add("foo")); - } - - public void testStringSetOfOne() { - final String[] strings = { "foo" }; - final java.util.Set stringsSet = Set.of(strings); - assertThat(stringsSet.size(), equalTo(strings.length)); - assertTrue(stringsSet.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsSet.add("foo")); - } - - public void testStringSetOfTwo() { - final String[] strings = { "foo", "bar" }; - final java.util.Set stringsSet = Set.of(strings); - assertThat(stringsSet.size(), equalTo(strings.length)); - assertTrue(stringsSet.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsSet.add("foo")); - } - - public void testStringSetOfN() { - final String[] strings = { "foo", "bar", "baz" }; - final java.util.Set stringsSet = Set.of(strings); - assertThat(stringsSet.size(), equalTo(strings.length)); - assertTrue(stringsSet.containsAll(Arrays.asList(strings))); - expectThrows(UnsupportedOperationException.class, () -> stringsSet.add("foo")); - } - - public void testCopyOf() { - final Collection coll = Arrays.asList("foo", "bar", "baz"); - final java.util.Set copy = Set.copyOf(coll); - assertThat(coll.size(), equalTo(copy.size())); - assertTrue(copy.containsAll(coll)); - expectThrows(UnsupportedOperationException.class, () -> copy.add("foo")); - } -} diff --git a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java index ed48585cc124a..bdcde57f91bb3 100644 --- a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java @@ -65,20 +65,20 @@ public class GrokTests extends OpenSearchTestCase { public void testMatchWithoutCaptures() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "value", logger::warn); - assertThat(grok.captures("value"), equalTo(org.opensearch.common.collect.Map.of())); - assertThat(grok.captures("prefix_value"), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(grok.captures("value"), equalTo(Map.of())); + assertThat(grok.captures("prefix_value"), equalTo(Map.of())); assertThat(grok.captures("no_match"), nullValue()); } public void testCaputuresBytes() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{NUMBER:n:int}", logger::warn); byte[] utf8 = "10".getBytes(StandardCharsets.UTF_8); - assertThat(captureBytes(grok, utf8, 0, utf8.length), equalTo(org.opensearch.common.collect.Map.of("n", 10))); - assertThat(captureBytes(grok, utf8, 0, 1), equalTo(org.opensearch.common.collect.Map.of("n", 1))); + assertThat(captureBytes(grok, utf8, 0, utf8.length), equalTo(Map.of("n", 10))); + assertThat(captureBytes(grok, utf8, 0, 1), equalTo(Map.of("n", 1))); utf8 = "10 11 12".getBytes(StandardCharsets.UTF_8); - assertThat(captureBytes(grok, utf8, 0, 2), equalTo(org.opensearch.common.collect.Map.of("n", 10))); - assertThat(captureBytes(grok, utf8, 3, 2), equalTo(org.opensearch.common.collect.Map.of("n", 11))); - assertThat(captureBytes(grok, utf8, 6, 2), equalTo(org.opensearch.common.collect.Map.of("n", 12))); + assertThat(captureBytes(grok, utf8, 0, 2), equalTo(Map.of("n", 10))); + assertThat(captureBytes(grok, utf8, 3, 2), equalTo(Map.of("n", 11))); + assertThat(captureBytes(grok, utf8, 6, 2), equalTo(Map.of("n", 12))); } private Map captureBytes(Grok grok, byte[] utf8, int offset, int length) { @@ -99,15 +99,15 @@ public void testSimpleSyslogLine() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{SYSLOGLINE}", logger::warn); assertCaptureConfig( grok, - org.opensearch.common.collect.Map.ofEntries( - org.opensearch.common.collect.Map.entry("facility", STRING), - org.opensearch.common.collect.Map.entry("logsource", STRING), - org.opensearch.common.collect.Map.entry("message", STRING), - org.opensearch.common.collect.Map.entry("pid", STRING), - org.opensearch.common.collect.Map.entry("priority", STRING), - org.opensearch.common.collect.Map.entry("program", STRING), - org.opensearch.common.collect.Map.entry("timestamp", STRING), - org.opensearch.common.collect.Map.entry("timestamp8601", STRING) + Map.ofEntries( + Map.entry("facility", STRING), + Map.entry("logsource", STRING), + Map.entry("message", STRING), + Map.entry("pid", STRING), + Map.entry("priority", STRING), + Map.entry("program", STRING), + Map.entry("timestamp", STRING), + Map.entry("timestamp8601", STRING) ) ); Map matches = grok.captures(line); @@ -134,16 +134,16 @@ public void testSyslog5424Line() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{SYSLOG5424LINE}", logger::warn); assertCaptureConfig( grok, - org.opensearch.common.collect.Map.ofEntries( - org.opensearch.common.collect.Map.entry("syslog5424_app", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_host", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_msg", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_msgid", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_pri", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_proc", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_sd", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_ts", STRING), - org.opensearch.common.collect.Map.entry("syslog5424_ver", STRING) + Map.ofEntries( + Map.entry("syslog5424_app", STRING), + Map.entry("syslog5424_host", STRING), + Map.entry("syslog5424_msg", STRING), + Map.entry("syslog5424_msgid", STRING), + Map.entry("syslog5424_pri", STRING), + Map.entry("syslog5424_proc", STRING), + Map.entry("syslog5424_sd", STRING), + Map.entry("syslog5424_ts", STRING), + Map.entry("syslog5424_ver", STRING) ) ); Map matches = grok.captures(line); @@ -161,14 +161,14 @@ public void testSyslog5424Line() { public void testDatePattern() { String line = "fancy 12-12-12 12:12:12"; Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "(?%{DATE_EU} %{TIME})", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of("timestamp", STRING)); + assertCaptureConfig(grok, Map.of("timestamp", STRING)); Map matches = grok.captures(line); assertEquals("12-12-12 12:12:12", matches.get("timestamp")); } public void testNilCoercedValues() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "test (N/A|%{BASE10NUM:duration:float}ms)", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of("duration", FLOAT)); + assertCaptureConfig(grok, Map.of("duration", FLOAT)); Map matches = grok.captures("test 28.4ms"); assertEquals(28.4f, matches.get("duration")); matches = grok.captures("test N/A"); @@ -177,7 +177,7 @@ public void testNilCoercedValues() { public void testNilWithNoCoercion() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "test (N/A|%{BASE10NUM:duration}ms)", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of("duration", STRING)); + assertCaptureConfig(grok, Map.of("duration", STRING)); Map matches = grok.captures("test 28.4ms"); assertEquals("28.4", matches.get("duration")); matches = grok.captures("test N/A"); @@ -194,13 +194,13 @@ public void testUnicodeSyslog() { ); assertCaptureConfig( grok, - org.opensearch.common.collect.Map.ofEntries( - org.opensearch.common.collect.Map.entry("syslog_hostname", STRING), - org.opensearch.common.collect.Map.entry("syslog_message", STRING), - org.opensearch.common.collect.Map.entry("syslog_pid", STRING), - org.opensearch.common.collect.Map.entry("syslog_pri", STRING), - org.opensearch.common.collect.Map.entry("syslog_program", STRING), - org.opensearch.common.collect.Map.entry("syslog_timestamp", STRING) + Map.ofEntries( + Map.entry("syslog_hostname", STRING), + Map.entry("syslog_message", STRING), + Map.entry("syslog_pid", STRING), + Map.entry("syslog_pri", STRING), + Map.entry("syslog_program", STRING), + Map.entry("syslog_timestamp", STRING) ) ); Map matches = grok.captures( @@ -215,21 +215,21 @@ public void testUnicodeSyslog() { public void testNamedFieldsWithWholeTextMatch() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{DATE_EU:stimestamp}", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of("stimestamp", STRING)); + assertCaptureConfig(grok, Map.of("stimestamp", STRING)); Map matches = grok.captures("11/01/01"); assertThat(matches.get("stimestamp"), equalTo("11/01/01")); } public void testWithOniguramaNamedCaptures() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "(?\\w+)", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of("foo", STRING)); + assertCaptureConfig(grok, Map.of("foo", STRING)); Map matches = grok.captures("hello world"); assertThat(matches.get("foo"), equalTo("hello")); } public void testISO8601() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "^%{TIMESTAMP_ISO8601}$", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of()); + assertCaptureConfig(grok, Map.of()); List timeMessages = Arrays.asList( "2001-01-01T00:00:00", "1974-03-02T04:09:09", @@ -254,7 +254,7 @@ public void testISO8601() { public void testNotISO8601() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "^%{TIMESTAMP_ISO8601}$", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of()); + assertCaptureConfig(grok, Map.of()); List timeMessages = Arrays.asList( "2001-13-01T00:00:00", // invalid month "2001-00-01T00:00:00", // invalid month @@ -294,7 +294,7 @@ public void testNoNamedCaptures() { String text = "wowza !!!Tal!!! - Tal"; String pattern = "%{EXCITED_NAME} - %{NAME}"; Grok g = new Grok(bank, pattern, false, logger::warn); - assertCaptureConfig(g, org.opensearch.common.collect.Map.of("EXCITED_NAME_0", STRING, "NAME_21", STRING, "NAME_22", STRING)); + assertCaptureConfig(g, Map.of("EXCITED_NAME_0", STRING, "NAME_21", STRING, "NAME_22", STRING)); assertEquals("(?!!!(?Tal)!!!) - (?Tal)", g.toRegex(pattern)); assertEquals(true, g.match(text)); @@ -400,7 +400,7 @@ public void testMalformedPattern() { public void testBooleanCaptures() { String pattern = "%{WORD:name}=%{WORD:status:boolean}"; Grok g = new Grok(Grok.BUILTIN_PATTERNS, pattern, logger::warn); - assertCaptureConfig(g, org.opensearch.common.collect.Map.of("name", STRING, "status", BOOLEAN)); + assertCaptureConfig(g, Map.of("name", STRING, "status", BOOLEAN)); String text = "active=true"; Map expected = new HashMap<>(); @@ -428,7 +428,7 @@ public void testNumericCaptures() { String pattern = "%{NUMBER:bytes:float} %{NUMBER:id:long} %{NUMBER:rating:double}"; Grok g = new Grok(bank, pattern, logger::warn); - assertCaptureConfig(g, org.opensearch.common.collect.Map.of("bytes", FLOAT, "id", LONG, "rating", DOUBLE)); + assertCaptureConfig(g, Map.of("bytes", FLOAT, "id", LONG, "rating", DOUBLE)); String text = "12009.34 20000000000 4820.092"; Map expected = new HashMap<>(); @@ -476,7 +476,7 @@ public void testNumericCapturesCoercion() { String pattern = "%{NUMBER:bytes:float} %{NUMBER:status} %{NUMBER}"; Grok g = new Grok(bank, pattern, logger::warn); - assertCaptureConfig(g, org.opensearch.common.collect.Map.of("bytes", FLOAT, "status", STRING)); + assertCaptureConfig(g, Map.of("bytes", FLOAT, "status", STRING)); String text = "12009.34 200 9032"; Map expected = new HashMap<>(); @@ -494,8 +494,8 @@ public void testGarbageTypeNameBecomesString() { String pattern = "%{NUMBER:f:not_a_valid_type}"; Grok g = new Grok(bank, pattern, logger::warn); - assertCaptureConfig(g, org.opensearch.common.collect.Map.of("f", STRING)); - assertThat(g.captures("12009.34"), equalTo(org.opensearch.common.collect.Map.of("f", "12009.34"))); + assertCaptureConfig(g, Map.of("f", STRING)); + assertThat(g.captures("12009.34"), equalTo(Map.of("f", "12009.34"))); } public void testApacheLog() { @@ -505,19 +505,19 @@ public void testApacheLog() { Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{COMBINEDAPACHELOG}", logger::warn); assertCaptureConfig( grok, - org.opensearch.common.collect.Map.ofEntries( - org.opensearch.common.collect.Map.entry("agent", STRING), - org.opensearch.common.collect.Map.entry("auth", STRING), - org.opensearch.common.collect.Map.entry("bytes", STRING), - org.opensearch.common.collect.Map.entry("clientip", STRING), - org.opensearch.common.collect.Map.entry("httpversion", STRING), - org.opensearch.common.collect.Map.entry("ident", STRING), - org.opensearch.common.collect.Map.entry("rawrequest", STRING), - org.opensearch.common.collect.Map.entry("referrer", STRING), - org.opensearch.common.collect.Map.entry("request", STRING), - org.opensearch.common.collect.Map.entry("response", STRING), - org.opensearch.common.collect.Map.entry("timestamp", STRING), - org.opensearch.common.collect.Map.entry("verb", STRING) + Map.ofEntries( + Map.entry("agent", STRING), + Map.entry("auth", STRING), + Map.entry("bytes", STRING), + Map.entry("clientip", STRING), + Map.entry("httpversion", STRING), + Map.entry("ident", STRING), + Map.entry("rawrequest", STRING), + Map.entry("referrer", STRING), + Map.entry("request", STRING), + Map.entry("response", STRING), + Map.entry("timestamp", STRING), + Map.entry("verb", STRING) ) ); Map matches = grok.captures(logLine); @@ -594,18 +594,18 @@ public void testComplete() { Grok grok = new Grok(bank, pattern, logger::warn); assertCaptureConfig( grok, - org.opensearch.common.collect.Map.ofEntries( - org.opensearch.common.collect.Map.entry("agent", STRING), - org.opensearch.common.collect.Map.entry("auth", STRING), - org.opensearch.common.collect.Map.entry("bytes", INTEGER), - org.opensearch.common.collect.Map.entry("clientip", STRING), - org.opensearch.common.collect.Map.entry("httpversion", STRING), - org.opensearch.common.collect.Map.entry("ident", STRING), - org.opensearch.common.collect.Map.entry("referrer", STRING), - org.opensearch.common.collect.Map.entry("request", STRING), - org.opensearch.common.collect.Map.entry("response", INTEGER), - org.opensearch.common.collect.Map.entry("timestamp", STRING), - org.opensearch.common.collect.Map.entry("verb", STRING) + Map.ofEntries( + Map.entry("agent", STRING), + Map.entry("auth", STRING), + Map.entry("bytes", INTEGER), + Map.entry("clientip", STRING), + Map.entry("httpversion", STRING), + Map.entry("ident", STRING), + Map.entry("referrer", STRING), + Map.entry("request", STRING), + Map.entry("response", INTEGER), + Map.entry("timestamp", STRING), + Map.entry("verb", STRING) ) ); @@ -642,7 +642,7 @@ public void testMultipleNamedCapturesWithSameName() { Map bank = new HashMap<>(); bank.put("SINGLEDIGIT", "[0-9]"); Grok grok = new Grok(bank, "%{SINGLEDIGIT:num}%{SINGLEDIGIT:num}", logger::warn); - assertCaptureConfig(grok, org.opensearch.common.collect.Map.of("num", STRING)); + assertCaptureConfig(grok, Map.of("num", STRING)); Map expected = new HashMap<>(); expected.put("num", "1"); diff --git a/libs/x-content/licenses/snakeyaml-1.33.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.33.jar.sha1 deleted file mode 100644 index c8a323290e7ba..0000000000000 --- a/libs/x-content/licenses/snakeyaml-1.33.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2cd0a87ff7df953f810c344bdf2fe3340b954c69 \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-2.0.jar.sha1 b/libs/x-content/licenses/snakeyaml-2.0.jar.sha1 new file mode 100644 index 0000000000000..d09dea5564729 --- /dev/null +++ b/libs/x-content/licenses/snakeyaml-2.0.jar.sha1 @@ -0,0 +1 @@ +3aab2116756442bf0d4cd1c089b24d34c3baa253 \ No newline at end of file diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java index 03404a284020b..e38599818d04f 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java @@ -244,10 +244,9 @@ public void testShingleFilters() { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - expectThrows( - IllegalArgumentException.class, - () -> { indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; } - ); + expectThrows(IllegalArgumentException.class, () -> { + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; + }); } @@ -308,7 +307,9 @@ public void testPreconfiguredTokenFilters() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, "Expected exception for factory " + tf.getName(), - () -> { tf.get(idxSettings, null, tf.getName(), settings).getSynonymFilter(); } + () -> { + tf.get(idxSettings, null, tf.getName(), settings).getSynonymFilter(); + } ); assertEquals(tf.getName(), "Token filter [" + tf.getName() + "] cannot be used to parse synonyms", e.getMessage()); } else { diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index d6153637f656d..d1c1d0e0d1ea8 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -123,7 +123,9 @@ public void testUnmapped() throws IOException { randomPrecision(), null, geoGrid -> { assertEquals(0, geoGrid.getBuckets().size()); }, - iw -> { iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); } + iw -> { + iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); + } ); } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/AppendProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/AppendProcessorTests.java index 7caa63792f347..1cb1ad7e408f6 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/AppendProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/AppendProcessorTests.java @@ -203,7 +203,7 @@ public void testAppendingUniqueValueToScalar() throws Exception { appendProcessor.execute(ingestDocument); List list = ingestDocument.getFieldValue(field, List.class); assertThat(list.size(), equalTo(2)); - assertThat(list, equalTo(org.opensearch.common.collect.List.of(originalValue, newValue))); + assertThat(list, equalTo(List.of(originalValue, newValue))); } public void testAppendingToListWithDuplicatesDisallowed() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java index f49d5492a09b3..241945e58fa06 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java @@ -226,12 +226,8 @@ public void testModifyFieldsOutsideArray() throws Exception { "values", new CompoundProcessor( false, - org.opensearch.common.collect.List.of( - new UppercaseProcessor("_tag_upper", null, "_ingest._value", false, "_ingest._value") - ), - org.opensearch.common.collect.List.of( - new AppendProcessor("_tag", null, template, (model) -> (Collections.singletonList("added")), true) - ) + List.of(new UppercaseProcessor("_tag_upper", null, "_ingest._value", false, "_ingest._value")), + List.of(new AppendProcessor("_tag", null, template, (model) -> (Collections.singletonList("added")), true)) ), false ); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java index 5ccf85dd53f0e..641d8cdc5bcfe 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java @@ -57,7 +57,7 @@ import static org.mockito.Mockito.mock; public class GrokProcessorGetActionTests extends OpenSearchTestCase { - private static final Map TEST_PATTERNS = org.opensearch.common.collect.Map.of("PATTERN2", "foo2", "PATTERN1", "foo1"); + private static final Map TEST_PATTERNS = Map.of("PATTERN2", "foo2", "PATTERN1", "foo1"); public void testRequest() throws Exception { GrokProcessorGetAction.Request request = new GrokProcessorGetAction.Request(false); diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java index 540d68b0982eb..31cd9e8ef2f6a 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java @@ -65,11 +65,9 @@ public void testThrowsFunctionsException() { GeoIpCache cache = new GeoIpCache(1); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> cache.putIfAbsent( - InetAddresses.forString("127.0.0.1"), - AbstractResponse.class, - ip -> { throw new IllegalArgumentException("bad"); } - ) + () -> cache.putIfAbsent(InetAddresses.forString("127.0.0.1"), AbstractResponse.class, ip -> { + throw new IllegalArgumentException("bad"); + }) ); assertEquals("bad", ex.getMessage()); } diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DefBootstrapTests.java index 9134d7a3c062b..d063a3fd5c0a6 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DefBootstrapTests.java @@ -157,21 +157,13 @@ public void testMegamorphic() throws Throwable { map.put("a", "b"); assertEquals(2, (int) handle.invokeExact((Object) map)); - final IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> { Integer.toString((int) handle.invokeExact(new Object())); } - ); + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> { + Integer.toString((int) handle.invokeExact(new Object())); + }); assertEquals("dynamic method [java.lang.Object, size/0] not found", iae.getMessage()); - assertTrue( - "Does not fail inside ClassValue.computeValue()", - Arrays.stream(iae.getStackTrace()) - .anyMatch( - e -> { - return e.getMethodName().equals("computeValue") - && e.getClassName().startsWith("org.opensearch.painless.DefBootstrap$PIC$"); - } - ) - ); + assertTrue("Does not fail inside ClassValue.computeValue()", Arrays.stream(iae.getStackTrace()).anyMatch(e -> { + return e.getMethodName().equals("computeValue") && e.getClassName().startsWith("org.opensearch.painless.DefBootstrap$PIC$"); + })); } // test operators with null guards diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionRefTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionRefTests.java index 2ed1fa49d0bda..065e0fbd1f4e2 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionRefTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionRefTests.java @@ -268,18 +268,16 @@ public void testInterfaceStaticMethod() { } public void testMethodMissing() { - Exception e = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("List l = [2, 1]; l.sort(Integer::bogus); return l.get(0);"); } - ); + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = [2, 1]; l.sort(Integer::bogus); return l.get(0);"); + }); assertThat(e.getMessage(), containsString("function reference [Integer::bogus/2] matching [java.util.Comparator")); } public void testQualifiedMethodMissing() { - Exception e = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("List l = [2, 1]; l.sort(java.time.Instant::bogus); return l.get(0);", false); } - ); + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = [2, 1]; l.sort(java.time.Instant::bogus); return l.get(0);", false); + }); assertThat( e.getMessage(), containsString("function reference [java.time.Instant::bogus/2] matching [java.util.Comparator, compare/2") @@ -287,26 +285,23 @@ public void testQualifiedMethodMissing() { } public void testClassMissing() { - Exception e = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("List l = [2, 1]; l.sort(Bogus::bogus); return l.get(0);", false); } - ); + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = [2, 1]; l.sort(Bogus::bogus); return l.get(0);", false); + }); assertThat(e.getMessage(), endsWith("variable [Bogus] is not defined")); } public void testQualifiedClassMissing() { - Exception e = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("List l = [2, 1]; l.sort(org.joda.time.BogusDateTime::bogus); return l.get(0);", false); } - ); + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = [2, 1]; l.sort(org.joda.time.BogusDateTime::bogus); return l.get(0);", false); + }); assertEquals("variable [org.joda.time.BogusDateTime] is not defined", e.getMessage()); } public void testNotFunctionalInterface() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("List l = new ArrayList(); l.add(2); l.add(1); l.add(Integer::bogus); return l.get(0);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = new ArrayList(); l.add(2); l.add(1); l.add(Integer::bogus); return l.get(0);"); + }); assertThat( expected.getMessage(), containsString("cannot convert function reference [Integer::bogus] to a non-functional interface [def]") @@ -314,17 +309,15 @@ public void testNotFunctionalInterface() { } public void testIncompatible() { - expectScriptThrows( - ClassCastException.class, - () -> { exec("List l = new ArrayList(); l.add(2); l.add(1); l.sort(String::startsWith); return l.get(0);"); } - ); + expectScriptThrows(ClassCastException.class, () -> { + exec("List l = new ArrayList(); l.add(2); l.add(1); l.sort(String::startsWith); return l.get(0);"); + }); } public void testWrongArity() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("Optional.empty().orElseGet(String::startsWith);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("Optional.empty().orElseGet(String::startsWith);"); + }); assertThat( expected.getMessage(), containsString("function reference [String::startsWith/0] matching [java.util.function.Supplier") @@ -332,18 +325,16 @@ public void testWrongArity() { } public void testWrongArityNotEnough() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("List l = new ArrayList(); l.add(2); l.add(1); l.sort(String::isEmpty);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = new ArrayList(); l.add(2); l.add(1); l.sort(String::isEmpty);"); + }); assertThat(expected.getMessage(), containsString("function reference [String::isEmpty/2] matching [java.util.Comparator")); } public void testWrongArityDef() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def y = Optional.empty(); return y.orElseGet(String::startsWith);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def y = Optional.empty(); return y.orElseGet(String::startsWith);"); + }); assertThat( expected.getMessage(), containsString("function reference [String::startsWith/0] matching [java.util.function.Supplier") @@ -351,38 +342,33 @@ public void testWrongArityDef() { } public void testWrongArityNotEnoughDef() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def l = new ArrayList(); l.add(2); l.add(1); l.sort(String::isEmpty);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def l = new ArrayList(); l.add(2); l.add(1); l.sort(String::isEmpty);"); + }); assertThat(expected.getMessage(), containsString("function reference [String::isEmpty/2] matching [java.util.Comparator")); } public void testReturnVoid() { - Throwable expected = expectScriptThrows( - ClassCastException.class, - () -> { exec("StringBuilder b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(b::setLength).sum();"); } - ); + Throwable expected = expectScriptThrows(ClassCastException.class, () -> { + exec("StringBuilder b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(b::setLength).sum();"); + }); assertThat(expected.getMessage(), containsString("Cannot cast from [void] to [long].")); } public void testReturnVoidDef() { - Exception expected = expectScriptThrows( - LambdaConversionException.class, - () -> { exec("StringBuilder b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(b::setLength);"); } - ); + Exception expected = expectScriptThrows(LambdaConversionException.class, () -> { + exec("StringBuilder b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(b::setLength);"); + }); assertThat(expected.getMessage(), containsString("lambda expects return type [long], but found return type [void]")); - expected = expectScriptThrows( - LambdaConversionException.class, - () -> { exec("def b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(b::setLength);"); } - ); + expected = expectScriptThrows(LambdaConversionException.class, () -> { + exec("def b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(b::setLength);"); + }); assertThat(expected.getMessage(), containsString("lambda expects return type [long], but found return type [void]")); - expected = expectScriptThrows( - LambdaConversionException.class, - () -> { exec("def b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(b::setLength);"); } - ); + expected = expectScriptThrows(LambdaConversionException.class, () -> { + exec("def b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(b::setLength);"); + }); assertThat(expected.getMessage(), containsString("lambda expects return type [long], but found return type [void]")); } } diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionTests.java index 77b6e8f0b3d5c..45efaa2c8a5a4 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/FunctionTests.java @@ -82,10 +82,9 @@ public void testReturnsAreUnboxedIfNeeded() { } public void testDuplicates() { - Exception expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("void test(int x) {x = 2;} void test(def y) {y = 3;} test()"); } - ); + Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("void test(int x) {x = 2;} void test(def y) {y = 3;} test()"); + }); assertThat(expected.getMessage(), containsString("found duplicate function")); } @@ -108,23 +107,20 @@ public void testInfiniteLoop() { public void testReturnVoid() { assertEquals(null, exec("void test(StringBuilder b, int i) {b.setLength(i)} test(new StringBuilder(), 1)")); - Exception expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("int test(StringBuilder b, int i) {b.setLength(i)} test(new StringBuilder(), 1)"); } - ); + Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("int test(StringBuilder b, int i) {b.setLength(i)} test(new StringBuilder(), 1)"); + }); assertEquals( "invalid function definition: " + "not all paths provide a return value for function [test] with [2] parameters", expected.getMessage() ); - expected = expectScriptThrows( - ClassCastException.class, - () -> { exec("int test(StringBuilder b, int i) {return b.setLength(i)} test(new StringBuilder(), 1)"); } - ); + expected = expectScriptThrows(ClassCastException.class, () -> { + exec("int test(StringBuilder b, int i) {return b.setLength(i)} test(new StringBuilder(), 1)"); + }); assertEquals("Cannot cast from [void] to [int].", expected.getMessage()); - expected = expectScriptThrows( - ClassCastException.class, - () -> { exec("def test(StringBuilder b, int i) {return b.setLength(i)} test(new StringBuilder(), 1)"); } - ); + expected = expectScriptThrows(ClassCastException.class, () -> { + exec("def test(StringBuilder b, int i) {return b.setLength(i)} test(new StringBuilder(), 1)"); + }); assertEquals("Cannot cast from [void] to [def].", expected.getMessage()); } } diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/GeneralCastTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/GeneralCastTests.java index 225dbd51819de..27930e2cb43d8 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/GeneralCastTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/GeneralCastTests.java @@ -293,14 +293,12 @@ public void testIllegalExplicitConversionsDef() { } public void testIllegalVoidCasts() { - expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def map = ['a': 1,'b': 2,'c': 3]; map.c = Collections.sort(new ArrayList(map.keySet()));"); } - ); - expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("Map map = ['a': 1,'b': 2,'c': 3]; def x = new HashMap(); x.put(1, map.clear());"); } - ); + expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def map = ['a': 1,'b': 2,'c': 3]; map.c = Collections.sort(new ArrayList(map.keySet()));"); + }); + expectScriptThrows(IllegalArgumentException.class, () -> { + exec("Map map = ['a': 1,'b': 2,'c': 3]; def x = new HashMap(); x.put(1, map.clear());"); + }); } public void testBoxedDefCalls() { diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/LambdaTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/LambdaTests.java index c1b19522021b8..8d0cbb4018801 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/LambdaTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/LambdaTests.java @@ -173,28 +173,19 @@ public void testTwoCaptures() { } public void testCapturesAreReadOnly() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { - exec( - "List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(x -> { l = null; return x + 1 }).sum();" - ); - } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(x -> { l = null; return x + 1 }).sum();"); + }); assertTrue(expected.getMessage().contains("is read-only")); } /** Lambda parameters shouldn't be able to mask a variable already in scope */ public void testNoParamMasking() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { - exec( - "int x = 0; List l = new ArrayList(); l.add(1); l.add(1); " - + "return l.stream().mapToInt(x -> { x += 1; return x }).sum();" - ); - } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec( + "int x = 0; List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(x -> { x += 1; return x }).sum();" + ); + }); assertTrue(expected.getMessage().contains("already defined")); } @@ -214,36 +205,30 @@ public void testNestedCaptureParams() { } public void testWrongArity() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - false, - () -> { exec("Optional.empty().orElseGet(x -> x);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { + exec("Optional.empty().orElseGet(x -> x);"); + }); assertTrue(expected.getMessage().contains("Incorrect number of parameters")); } public void testWrongArityDef() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def y = Optional.empty(); return y.orElseGet(x -> x);"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def y = Optional.empty(); return y.orElseGet(x -> x);"); + }); assertTrue(expected.getMessage(), expected.getMessage().contains("due to an incorrect number of arguments")); } public void testWrongArityNotEnough() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - false, - () -> { exec("List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(() -> 5).sum();"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { + exec("List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(() -> 5).sum();"); + }); assertTrue(expected.getMessage().contains("Incorrect number of parameters")); } public void testWrongArityNotEnoughDef() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(() -> 5).sum();"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(() -> 5).sum();"); + }); assertTrue(expected.getMessage(), expected.getMessage().contains("due to an incorrect number of arguments")); } @@ -308,19 +293,17 @@ public void testReservedCapture() { } public void testReturnVoid() { - Throwable expected = expectScriptThrows( - ClassCastException.class, - () -> { exec("StringBuilder b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(i -> b.setLength(i))"); } - ); + Throwable expected = expectScriptThrows(ClassCastException.class, () -> { + exec("StringBuilder b = new StringBuilder(); List l = [1, 2]; l.stream().mapToLong(i -> b.setLength(i))"); + }); assertThat(expected.getMessage(), containsString("Cannot cast from [void] to [long].")); } public void testReturnVoidDef() { // If we can catch the error at compile time we do - Exception expected = expectScriptThrows( - ClassCastException.class, - () -> { exec("StringBuilder b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(i -> b.setLength(i))"); } - ); + Exception expected = expectScriptThrows(ClassCastException.class, () -> { + exec("StringBuilder b = new StringBuilder(); def l = [1, 2]; l.stream().mapToLong(i -> b.setLength(i))"); + }); assertThat(expected.getMessage(), containsString("Cannot cast from [void] to [def].")); // Otherwise we convert the void into a null diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/OverloadTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/OverloadTests.java index 16f3c16a59548..02fa18ec07178 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/OverloadTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/OverloadTests.java @@ -38,20 +38,18 @@ public class OverloadTests extends ScriptTestCase { public void testMethod() { // assertEquals(2, exec("return 'abc123abc'.indexOf('c');")); // assertEquals(8, exec("return 'abc123abc'.indexOf('c', 3);")); - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("return 'abc123abc'.indexOf('c', 3, 'bogus');"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("return 'abc123abc'.indexOf('c', 3, 'bogus');"); + }); assertTrue(expected.getMessage().contains("[java.lang.String, indexOf/3]")); } public void testMethodDynamic() { assertEquals(2, exec("def x = 'abc123abc'; return x.indexOf('c');")); assertEquals(8, exec("def x = 'abc123abc'; return x.indexOf('c', 3);")); - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def x = 'abc123abc'; return x.indexOf('c', 3, 'bogus');"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def x = 'abc123abc'; return x.indexOf('c', 3, 'bogus');"); + }); assertTrue(expected.getMessage().contains("dynamic method [java.lang.String, indexOf/3] not found")); } diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/StringTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/StringTests.java index d344a9c2a31f1..b9586924d4fcd 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/StringTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/StringTests.java @@ -182,11 +182,9 @@ public void testStringAndCharacter() { assertEquals('c', exec("String s = \"c\"; (char)s")); assertEquals('c', exec("String s = 'c'; (char)s")); - ClassCastException expected = expectScriptThrows( - ClassCastException.class, - false, - () -> { assertEquals("cc", exec("return (String)(char)\"cc\"")); } - ); + ClassCastException expected = expectScriptThrows(ClassCastException.class, false, () -> { + assertEquals("cc", exec("return (String)(char)\"cc\"")); + }); assertTrue(expected.getMessage().contains("cannot cast java.lang.String with length not equal to one to char")); expected = expectScriptThrows(ClassCastException.class, false, () -> { assertEquals("cc", exec("return (String)(char)'cc'")); }); diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java index fb8d2eccfa043..3f12bf57a0e33 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java @@ -57,10 +57,9 @@ public void testNullPointer() { public void testScriptStack() { for (String type : new String[] { "String", "def " }) { // trigger NPE at line 1 of the script - ScriptException exception = expectThrows( - ScriptException.class, - () -> { exec(type + " x = null; boolean y = x.isEmpty();\n" + "return y;"); } - ); + ScriptException exception = expectThrows(ScriptException.class, () -> { + exec(type + " x = null; boolean y = x.isEmpty();\n" + "return y;"); + }); // null deref at x.isEmpty(), the '.' is offset 30 assertScriptElementColumn(30, exception); assertScriptStack(exception, "y = x.isEmpty();\n", " ^---- HERE"); @@ -84,12 +83,9 @@ public void testScriptStack() { assertThat(exception.getCause(), instanceOf(NullPointerException.class)); // trigger NPE at line 4 in script (inside conditional) - exception = expectThrows( - ScriptException.class, - () -> { - exec(type + " x = null;\n" + "boolean y = false;\n" + "if (!y) {\n" + " y = x.isEmpty();\n" + "}\n" + "return y;"); - } - ); + exception = expectThrows(ScriptException.class, () -> { + exec(type + " x = null;\n" + "boolean y = false;\n" + "if (!y) {\n" + " y = x.isEmpty();\n" + "}\n" + "return y;"); + }); // null deref at x.isEmpty(), the '.' is offset 53 assertScriptElementColumn(53, exception); assertScriptStack(exception, "y = x.isEmpty();\n}\n", " ^---- HERE"); @@ -121,10 +117,9 @@ public void testInvalidShift() { } public void testBogusParameter() { - IllegalArgumentException expected = expectThrows( - IllegalArgumentException.class, - () -> { exec("return 5;", null, Collections.singletonMap("bogusParameterKey", "bogusParameterValue"), true); } - ); + IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { + exec("return 5;", null, Collections.singletonMap("bogusParameterKey", "bogusParameterValue"), true); + }); assertTrue(expected.getMessage().contains("Unrecognized compile-time parameter")); } @@ -178,10 +173,9 @@ public void testLoopLimits() { } public void testIllegalDynamicMethod() { - IllegalArgumentException expected = expectScriptThrows( - IllegalArgumentException.class, - () -> { exec("def x = 'test'; return x.getClass().toString()"); } - ); + IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { + exec("def x = 'test'; return x.getClass().toString()"); + }); assertTrue(expected.getMessage().contains("dynamic method [java.lang.String, getClass/0] not found")); } diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/WhitelistLoaderTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/WhitelistLoaderTests.java index e4e754a541414..b7ba3040894e3 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/WhitelistLoaderTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/WhitelistLoaderTests.java @@ -47,19 +47,15 @@ public class WhitelistLoaderTests extends ScriptTestCase { public void testUnknownAnnotations() { Map parsers = new HashMap<>(WhitelistAnnotationParser.BASE_ANNOTATION_PARSERS); - RuntimeException expected = expectThrows( - RuntimeException.class, - () -> { WhitelistLoader.loadFromResourceFiles(Whitelist.class, parsers, "org.opensearch.painless.annotation.unknown"); } - ); + RuntimeException expected = expectThrows(RuntimeException.class, () -> { + WhitelistLoader.loadFromResourceFiles(Whitelist.class, parsers, "org.opensearch.painless.annotation.unknown"); + }); assertEquals("invalid annotation: parser not found for [unknownAnnotation] [@unknownAnnotation]", expected.getCause().getMessage()); assertEquals(IllegalArgumentException.class, expected.getCause().getClass()); - expected = expectThrows( - RuntimeException.class, - () -> { - WhitelistLoader.loadFromResourceFiles(Whitelist.class, parsers, "org.opensearch.painless.annotation.unknown_with_options"); - } - ); + expected = expectThrows(RuntimeException.class, () -> { + WhitelistLoader.loadFromResourceFiles(Whitelist.class, parsers, "org.opensearch.painless.annotation.unknown_with_options"); + }); assertEquals( "invalid annotation: parser not found for [unknownAnootationWithMessage] [@unknownAnootationWithMessage[arg=\"arg value\"]]", expected.getCause().getMessage() diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java index fd029503e9a7b..1851afcf0af85 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/TokenCountFieldMapper.java @@ -122,7 +122,7 @@ static class TokenCountFieldType extends NumberFieldMapper.NumberFieldType { @Override public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (hasDocValues() == false) { - return lookup -> org.opensearch.common.collect.List.of(); + return lookup -> List.of(); } return new DocValueFetcher(docValueFormat(format, null), searchLookup.doc().getForField(this)); } diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java index 908e5db6196c3..7fcbb2045d0de 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java @@ -39,13 +39,13 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.opensearch.common.Strings; -import org.opensearch.common.collect.List; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.List; import static org.hamcrest.Matchers.instanceOf; diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java index 55d825d1b53bb..5ddbff1cf8708 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -42,6 +42,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.List; public class RankFeaturesFieldMapperTests extends MapperTestCase { @@ -58,7 +59,7 @@ protected void assertExistsQuery(MapperService mapperService) { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperExtrasPlugin()); + return List.of(new MapperExtrasPlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index d320a279bbfc5..0ba132596e16c 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -76,6 +76,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -134,7 +135,7 @@ protected void writeFieldValue(XContentBuilder builder) throws IOException { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperExtrasPlugin()); + return List.of(new MapperExtrasPlugin()); } @Override @@ -150,20 +151,9 @@ protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { NamedAnalyzer simple = new NamedAnalyzer("simple", AnalyzerScope.INDEX, new SimpleAnalyzer()); NamedAnalyzer whitespace = new NamedAnalyzer("whitespace", AnalyzerScope.INDEX, new WhitespaceAnalyzer()); return new IndexAnalyzers( - org.opensearch.common.collect.Map.of( - "default", - dflt, - "standard", - standard, - "keyword", - keyword, - "simple", - simple, - "whitespace", - whitespace - ), - org.opensearch.common.collect.Map.of(), - org.opensearch.common.collect.Map.of() + Map.of("default", dflt, "standard", standard, "keyword", keyword, "simple", simple, "whitespace", whitespace), + Map.of(), + Map.of() ); } diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java index 1998465318c2b..77e34560d4cad 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java @@ -49,6 +49,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.List; import static java.util.Arrays.asList; import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; @@ -154,9 +155,9 @@ public void testFetchSourceValue() throws IOException { SearchAsYouTypeFieldType fieldType = createFieldType(); fieldType.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); - assertEquals(org.opensearch.common.collect.List.of("value"), fetchSourceValue(fieldType, "value")); - assertEquals(org.opensearch.common.collect.List.of("42"), fetchSourceValue(fieldType, 42L)); - assertEquals(org.opensearch.common.collect.List.of("true"), fetchSourceValue(fieldType, true)); + assertEquals(List.of("value"), fetchSourceValue(fieldType, "value")); + assertEquals(List.of("42"), fetchSourceValue(fieldType, 42L)); + assertEquals(List.of("true"), fetchSourceValue(fieldType, true)); SearchAsYouTypeFieldMapper.PrefixFieldType prefixFieldType = new SearchAsYouTypeFieldMapper.PrefixFieldType( fieldType.name(), @@ -164,17 +165,17 @@ public void testFetchSourceValue() throws IOException { 2, 10 ); - assertEquals(org.opensearch.common.collect.List.of("value"), fetchSourceValue(prefixFieldType, "value")); - assertEquals(org.opensearch.common.collect.List.of("42"), fetchSourceValue(prefixFieldType, 42L)); - assertEquals(org.opensearch.common.collect.List.of("true"), fetchSourceValue(prefixFieldType, true)); + assertEquals(List.of("value"), fetchSourceValue(prefixFieldType, "value")); + assertEquals(List.of("42"), fetchSourceValue(prefixFieldType, 42L)); + assertEquals(List.of("true"), fetchSourceValue(prefixFieldType, true)); SearchAsYouTypeFieldMapper.ShingleFieldType shingleFieldType = new SearchAsYouTypeFieldMapper.ShingleFieldType( fieldType.name(), 5, fieldType.getTextSearchInfo() ); - assertEquals(org.opensearch.common.collect.List.of("value"), fetchSourceValue(shingleFieldType, "value")); - assertEquals(org.opensearch.common.collect.List.of("42"), fetchSourceValue(shingleFieldType, 42L)); - assertEquals(org.opensearch.common.collect.List.of("true"), fetchSourceValue(shingleFieldType, true)); + assertEquals(List.of("value"), fetchSourceValue(shingleFieldType, "value")); + assertEquals(List.of("42"), fetchSourceValue(shingleFieldType, 42L)); + assertEquals(List.of("true"), fetchSourceValue(shingleFieldType, true)); } } diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index b318a504ab7e7..3a07ba907be33 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -436,10 +436,9 @@ public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception client().admin().indices().prepareRefresh().get(); logger.info("percolating empty doc with source disabled"); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { client().prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)).get(); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + client().prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)).get(); + }); assertThat(e.getMessage(), containsString("source disabled")); } diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java index 87aa28a3346bc..6f2fe980f44ee 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java @@ -245,10 +245,9 @@ protected Set getObjectsHoldingArbitraryContent() { } public void testRequiredParameters() { - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { new PercolateQueryBuilder(null, new BytesArray("{}"), XContentType.JSON); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + new PercolateQueryBuilder(null, new BytesArray("{}"), XContentType.JSON); + }); assertThat(e.getMessage(), equalTo("[field] is a required argument")); e = expectThrows( diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/DiscountedCumulativeGain.java index 35db325b70ca5..ded1df2f43de5 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/DiscountedCumulativeGain.java @@ -282,11 +282,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - NAME, - true, - args -> { return new Detail((Double) args[0], (Double) args[1] != null ? (Double) args[1] : 0.0d, (Integer) args[2]); } - ); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Double) args[0], (Double) args[1] != null ? (Double) args[1] : 0.0d, (Integer) args[2]); + }); static { PARSER.declareDouble(constructorArg(), DCG_FIELD); diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/ExpectedReciprocalRank.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/ExpectedReciprocalRank.java index 3557e0576bb05..af699c65eb332 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/ExpectedReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/ExpectedReciprocalRank.java @@ -262,11 +262,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t return builder.field(UNRATED_FIELD.getPreferredName(), this.unratedDocs); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - NAME, - true, - args -> { return new Detail((Integer) args[0]); } - ); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Integer) args[0]); + }); static { PARSER.declareInt(constructorArg(), UNRATED_FIELD); diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/MeanReciprocalRank.java index 59792537e89e5..6ccecadd2d7f8 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/MeanReciprocalRank.java @@ -219,11 +219,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t return builder.field(FIRST_RELEVANT_RANK_FIELD.getPreferredName(), firstRelevantRank); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - NAME, - true, - args -> { return new Detail((Integer) args[0]); } - ); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Integer) args[0]); + }); static { PARSER.declareInt(constructorArg(), FIRST_RELEVANT_RANK_FIELD); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java index 2bf229b094245..2537886ddf577 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -346,11 +346,9 @@ public void testSerialization() throws IOException { } public void testEqualsAndHash() throws IOException { - checkEqualsAndHashCode( - createTestItem(), - original -> { return new DiscountedCumulativeGain(original.getNormalize(), original.getUnknownDocRating(), original.getK()); }, - DiscountedCumulativeGainTests::mutateTestItem - ); + checkEqualsAndHashCode(createTestItem(), original -> { + return new DiscountedCumulativeGain(original.getNormalize(), original.getUnknownDocRating(), original.getK()); + }, DiscountedCumulativeGainTests::mutateTestItem); } private static DiscountedCumulativeGain mutateTestItem(DiscountedCumulativeGain original) { diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java index 723a1e2202e2b..1b39f4e07efa5 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java @@ -213,11 +213,9 @@ public void testSerialization() throws IOException { } public void testEqualsAndHash() throws IOException { - checkEqualsAndHashCode( - createTestItem(), - original -> { return new ExpectedReciprocalRank(original.getMaxRelevance(), original.getUnknownDocRating(), original.getK()); }, - ExpectedReciprocalRankTests::mutateTestItem - ); + checkEqualsAndHashCode(createTestItem(), original -> { + return new ExpectedReciprocalRank(original.getMaxRelevance(), original.getUnknownDocRating(), original.getK()); + }, ExpectedReciprocalRankTests::mutateTestItem); } private static ExpectedReciprocalRank mutateTestItem(ExpectedReciprocalRank original) { diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java index f3fb01b153c55..042bf22765ade 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java @@ -91,11 +91,9 @@ public void testSerialization() throws IOException { } public void testEqualsAndHash() throws IOException { - checkEqualsAndHashCode( - createRatedDocument(), - original -> { return new RatedDocument(original.getIndex(), original.getDocID(), original.getRating()); }, - RatedDocumentTests::mutateTestItem - ); + checkEqualsAndHashCode(createRatedDocument(), original -> { + return new RatedDocument(original.getIndex(), original.getDocID(), original.getRating()); + }, RatedDocumentTests::mutateTestItem); } private static RatedDocument mutateTestItem(RatedDocument original) { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java index 9333e5bd3dcc9..3a79a0b35e391 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java @@ -105,10 +105,9 @@ private static class ExpectedException extends RuntimeException { public void testRetryFail() { int retries = randomInt(10); - ExpectedException ex = expectThrows( - ExpectedException.class, - () -> { dotestBasicsWithRetry(retries, retries + 1, retries + 1, e -> { throw new ExpectedException(e); }); } - ); + ExpectedException ex = expectThrows(ExpectedException.class, () -> { + dotestBasicsWithRetry(retries, retries + 1, retries + 1, e -> { throw new ExpectedException(e); }); + }); assertThat(ex.getCause(), instanceOf(OpenSearchRejectedExecutionException.class)); } diff --git a/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/20_repository.yml b/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/20_repository.yml index f40ad16bb4265..cc6b3b5ce1f33 100644 --- a/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/20_repository.yml +++ b/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/20_repository.yml @@ -27,6 +27,3 @@ type: url settings: url: "http://snapshot.unknown" - - - diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.87.Final.jar.sha1 deleted file mode 100644 index 8350f48acdb54..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8942174141fb0586e7fffefc8ae48e1458293696 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..67604d11c1eca --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +937eb60c19c5f5c1326b96123c9ec3d33238d4d5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.87.Final.jar.sha1 deleted file mode 100644 index 9a494c68b7afb..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d7ab7363f736114e6324ccf802d094529b30b8d8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..c8fb04a021807 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +9992a22c82e18b8fd4f34989535f3e504e55aa37 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.87.Final.jar.sha1 deleted file mode 100644 index 115657578cf80..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3230e95d6832ee4306fe1ca774c7bcecf1da4b28 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..861599ce1d1d2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +19bbcd46f8ee0d118486f98eff22fe665b9689e5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.87.Final.jar.sha1 deleted file mode 100644 index 066550b9e0135..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e7e80dd6e604144781fcb859b79cfe8d3730079 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..afb531805329e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +43597a09382c6ae2bef469a9b3a41e8a17850638 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.87.Final.jar.sha1 deleted file mode 100644 index 0923602100814..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bd97491c22ebea4670c00f1bd5dbf65a8a1cfe7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..c98bfb52393d6 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +64f6946ce4d9189cec5341d3f5f86ac5653099b5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.87.Final.jar.sha1 deleted file mode 100644 index 4465a47bd49fb..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eaa964e16a67914c8d9b814d29a4b969635d72a0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..b92177828aa56 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +47c415d8c83f08b820ba00e6497a6cf19dd0155f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.87.Final.jar.sha1 deleted file mode 100644 index dcb49c515e460..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c78c8ced3417f35e2a55f7533dc2bb43fef2aa \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..c7a77dbf6aaa8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +82d68da212f62b076c763f5efa9b072d2abc018f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.87.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.87.Final.jar.sha1 deleted file mode 100644 index 4fb195e5f08f6..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4bf8308dc7d6ba2570681eaeddb16f8d1196b438 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..5f954b2595927 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +e42282002cf22105e7e993651aead231246d0220 \ No newline at end of file diff --git a/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuAnalyzerTests.java b/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuAnalyzerTests.java index e490248fc8122..c363bc6eb43f8 100644 --- a/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuAnalyzerTests.java +++ b/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuAnalyzerTests.java @@ -90,10 +90,9 @@ public void testBadSettings() { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put("mode", "wrong").build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { new IcuAnalyzerProvider(idxSettings, null, "icu", settings); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + new IcuAnalyzerProvider(idxSettings, null, "icu", settings); + }); assertThat(e.getMessage(), containsString("Unknown mode [wrong] in analyzer [icu], expected one of [compose, decompose]")); diff --git a/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index 0cf82c408ae6f..a784e312b4463 100644 --- a/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -40,7 +40,6 @@ import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Strings; -import org.opensearch.common.collect.List; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.plugin.analysis.icu.AnalysisICUPlugin; import org.opensearch.plugins.Plugin; @@ -48,6 +47,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsString; @@ -69,7 +69,7 @@ protected ICUCollationKeywordFieldMapper.Builder newBuilder() { @Override protected Set unsupportedProperties() { - return org.opensearch.common.collect.Set.of("analyzer", "similarity"); + return Set.of("analyzer", "similarity"); } @Override diff --git a/plugins/mapper-murmur3/src/test/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index 5cfbc0cde1050..8bf5b86a06b39 100644 --- a/plugins/mapper-murmur3/src/test/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/opensearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -45,6 +45,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.List; public class Murmur3FieldMapperTests extends MapperTestCase { @@ -55,7 +56,7 @@ protected void writeFieldValue(XContentBuilder builder) throws IOException { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperMurmur3Plugin()); + return List.of(new MapperMurmur3Plugin()); } @Override diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 0105c32669ad8..89ea405464987 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -45,7 +45,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.34.0' - api 'com.azure:azure-storage-common:12.19.3' + api 'com.azure:azure-storage-common:12.20.0' api 'com.azure:azure-core-http-netty:1.12.8' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" @@ -57,9 +57,9 @@ dependencies { api 'com.azure:azure-storage-blob:12.15.0' api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.18' - api 'io.projectreactor.netty:reactor-netty:1.1.3' + api 'io.projectreactor.netty:reactor-netty:1.1.4' api 'io.projectreactor.netty:reactor-netty-core:1.0.24' - api 'io.projectreactor.netty:reactor-netty-http:1.1.3' + api 'io.projectreactor.netty:reactor-netty-http:1.1.4' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.19.3.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.19.3.jar.sha1 deleted file mode 100644 index c22aa05ad5258..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.19.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e30b1be613815b10ea8d2cfa39f560aaeef5f0d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.20.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.20.0.jar.sha1 new file mode 100644 index 0000000000000..1fd3f8066411d --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.20.0.jar.sha1 @@ -0,0 +1 @@ +69b962bbeea787c6aca83115472791aacc2ae94c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.87.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.87.Final.jar.sha1 deleted file mode 100644 index ea9c36f36b988..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -092ec0019d4e72c1305f4357694c4831aade60ce \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..3ef0c5df26b85 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +ebf5da8e6edf783d069d9aca346ff46c55772de6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.87.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.87.Final.jar.sha1 deleted file mode 100644 index da15720c2d0e1..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -72b756ff290d782c9ebb36db7d42ee272270c0b4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..64caa309f2c05 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +ea3be877ea976b3d71e1a872958d32854b24db66 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.87.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.87.Final.jar.sha1 deleted file mode 100644 index 5bd83f816a9e4..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -109027470b72d56914d76154e947f9bd5844bb9b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..2738db8a6710a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +7397535a4e03d2f74c71aa2282eb7a2760ffc37b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.87.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.87.Final.jar.sha1 deleted file mode 100644 index 226ea309ac472..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8079f16d6209e10f5d3a09522e7aa9ffb217e17e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..60bde875d0faf --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +6ab526a43a14f7796434fa6a705c34201603235f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.87.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.87.Final.jar.sha1 deleted file mode 100644 index 39d59cb45eed2..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a40442ff2afaf73d7ddc1d75d7e86814ef04f980 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..6124f27a050e0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +c9e6762805fe1bc854352dbc8020226f38674bce \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.87.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.87.Final.jar.sha1 deleted file mode 100644 index 4fb195e5f08f6..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4bf8308dc7d6ba2570681eaeddb16f8d1196b438 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..5f954b2595927 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +e42282002cf22105e7e993651aead231246d0220 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.3.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.3.jar.sha1 deleted file mode 100644 index 00cb9fe0edf44..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2745c13811d82b7c57d4ca52764bbaab83e3e2e2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.4.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.4.jar.sha1 new file mode 100644 index 0000000000000..ab76deb3bc1f1 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-1.1.4.jar.sha1 @@ -0,0 +1 @@ +1b66183ba316fbbd2d212eb9e9a3ba060ba557b0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.3.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.3.jar.sha1 deleted file mode 100644 index fda433830ce35..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ddebf6e059e10315e9a53417289c98745a8fc7a7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.4.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.4.jar.sha1 new file mode 100644 index 0000000000000..7848fcdd8f5cb --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.4.jar.sha1 @@ -0,0 +1 @@ +ca8b2f1b23e4593577e0f570e04bb80cd29ce1e3 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java index b540dd83c95a2..060ffdda79196 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java @@ -442,7 +442,7 @@ private static class Stats { private final AtomicLong putBlockListOperations = new AtomicLong(); private Map toMap() { - return org.opensearch.common.collect.Map.of( + return Map.of( "GetBlob", getOperations.get(), "ListBlobs", diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java index a7799fef475f3..3e71c389e2fb0 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java @@ -150,7 +150,7 @@ public AzureRepository( } private static Map buildLocation(RepositoryMetadata metadata) { - return org.opensearch.common.collect.Map.of( + return Map.of( "base_path", Repository.BASE_PATH_SETTING.get(metadata.settings()), "container", diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java index 7375d4edb9030..2f48de94a4830 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageHttpStatsCollector.java @@ -36,8 +36,8 @@ import com.google.api.client.http.HttpRequest; import com.google.api.client.http.HttpResponse; import com.google.api.client.http.HttpResponseInterceptor; -import org.opensearch.common.collect.List; +import java.util.List; import java.util.Locale; import java.util.function.Consumer; import java.util.function.Function; diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java index 909dc066a493b..2369248f7a244 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -115,12 +115,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { } private static Map buildLocation(RepositoryMetadata metadata) { - return org.opensearch.common.collect.Map.of( - "base_path", - BASE_PATH.get(metadata.settings()), - "bucket", - getSetting(BUCKET, metadata) - ); + return Map.of("base_path", BASE_PATH.get(metadata.settings()), "bucket", getSetting(BUCKET, metadata)); } @Override diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3712b9dbc6caa..d2f8eee79dfdd 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,7 +69,7 @@ dependencies { api 'org.apache.avro:avro:1.11.1' api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" - api 'com.google.protobuf:protobuf-java:3.22.0' + api 'com.google.protobuf:protobuf-java:3.22.2' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" @@ -82,7 +82,7 @@ dependencies { api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - api 'net.minidev:json-smart:2.4.7' + api 'net.minidev:json-smart:2.4.10' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" implementation 'org.codehaus.woodstox:stax2-api:4.2.1' diff --git a/plugins/repository-hdfs/licenses/json-smart-2.4.10.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.4.10.jar.sha1 new file mode 100644 index 0000000000000..faac861f60a28 --- /dev/null +++ b/plugins/repository-hdfs/licenses/json-smart-2.4.10.jar.sha1 @@ -0,0 +1 @@ +91cb329e9424bf32131eeb1ce2d17bf31b9899bc \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 b/plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 deleted file mode 100644 index 16f9a4431485a..0000000000000 --- a/plugins/repository-hdfs/licenses/json-smart-2.4.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d7f4c1530c07c54930935f3da85f48b83b3c109 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.87.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.87.Final.jar.sha1 deleted file mode 100644 index 8dcb64ecfdf24..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39de92ea74a05da937343695f780e59addb8b8ea \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..829204d91b994 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +0fb2bac7d106f8db84b111202bfb1c68a1aa89b8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.22.0.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.22.0.jar.sha1 deleted file mode 100644 index 4255cb52bf49c..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.22.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa58e31e88e9974452f0498e237532df5732257a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.22.2.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.22.2.jar.sha1 new file mode 100644 index 0000000000000..80feeec023e7b --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.22.2.jar.sha1 @@ -0,0 +1 @@ +fdee98b8f6abab73f146a4edb4c09e56f8278d03 \ No newline at end of file diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index e12b24f27dcdc..d0b63f17e3887 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -75,9 +75,9 @@ public void testHAFailoverWithRepository() throws Exception { String nn2Port = "10002"; if (ports.length() > 0) { final Path path = PathUtils.get(ports); - final List lines = AccessController.doPrivileged( - (PrivilegedExceptionAction>) () -> { return Files.readAllLines(path); } - ); + final List lines = AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { + return Files.readAllLines(path); + }); nn1Port = lines.get(0); nn2Port = lines.get(1); } diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml index fbe0e0a8b0066..7e76024dd2368 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot.yml @@ -36,7 +36,7 @@ - match: { snapshot.shards.failed : 0 } # Remove our snapshot - - do: + - do: snapshot.delete: repository: test_snapshot_repository snapshot: test_snapshot diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml index f38f4783b195b..3e61ef62f2a7d 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml @@ -59,7 +59,7 @@ - match: { snapshots.0.snapshot : test_snapshot_get } # Remove our snapshot - - do: + - do: snapshot.delete: repository: test_snapshot_get_repository snapshot: test_snapshot_get diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index c8377949a6842..9cc97244cae46 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -310,12 +310,7 @@ class S3Repository extends MeteredBlobStoreRepository { } private static Map buildLocation(RepositoryMetadata metadata) { - return org.opensearch.common.collect.Map.of( - "base_path", - BASE_PATH_SETTING.get(metadata.settings()), - "bucket", - BUCKET_SETTING.get(metadata.settings()) - ); + return Map.of("base_path", BASE_PATH_SETTING.get(metadata.settings()), "bucket", BUCKET_SETTING.get(metadata.settings())); } /** diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 5ebb2a835782a..8a6a6a334e1a9 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -65,7 +65,7 @@ thirdPartyAudit { 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', - + // from io.netty.handler.codec.protobuf.ProtobufDecoder (netty) 'com.google.protobuf.ExtensionRegistry', 'com.google.protobuf.MessageLite$Builder', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.87.Final.jar.sha1 deleted file mode 100644 index 8350f48acdb54..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8942174141fb0586e7fffefc8ae48e1458293696 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..67604d11c1eca --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +937eb60c19c5f5c1326b96123c9ec3d33238d4d5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.87.Final.jar.sha1 deleted file mode 100644 index 9a494c68b7afb..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d7ab7363f736114e6324ccf802d094529b30b8d8 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..c8fb04a021807 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +9992a22c82e18b8fd4f34989535f3e504e55aa37 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.87.Final.jar.sha1 deleted file mode 100644 index 115657578cf80..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3230e95d6832ee4306fe1ca774c7bcecf1da4b28 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..861599ce1d1d2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +19bbcd46f8ee0d118486f98eff22fe665b9689e5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.87.Final.jar.sha1 deleted file mode 100644 index 066550b9e0135..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e7e80dd6e604144781fcb859b79cfe8d3730079 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..afb531805329e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +43597a09382c6ae2bef469a9b3a41e8a17850638 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.87.Final.jar.sha1 deleted file mode 100644 index 0923602100814..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bd97491c22ebea4670c00f1bd5dbf65a8a1cfe7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..c98bfb52393d6 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +64f6946ce4d9189cec5341d3f5f86ac5653099b5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.87.Final.jar.sha1 deleted file mode 100644 index 4465a47bd49fb..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eaa964e16a67914c8d9b814d29a4b969635d72a0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..b92177828aa56 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +47c415d8c83f08b820ba00e6497a6cf19dd0155f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.87.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.87.Final.jar.sha1 deleted file mode 100644 index dcb49c515e460..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.87.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c78c8ced3417f35e2a55f7533dc2bb43fef2aa \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 new file mode 100644 index 0000000000000..c7a77dbf6aaa8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 @@ -0,0 +1 @@ +82d68da212f62b076c763f5efa9b072d2abc018f \ No newline at end of file diff --git a/qa/logging-config/src/test/java/org/opensearch/common/logging/OpenSearchJsonLayoutTests.java b/qa/logging-config/src/test/java/org/opensearch/common/logging/OpenSearchJsonLayoutTests.java index 6639d53c9c879..0a8bd46ac96f3 100644 --- a/qa/logging-config/src/test/java/org/opensearch/common/logging/OpenSearchJsonLayoutTests.java +++ b/qa/logging-config/src/test/java/org/opensearch/common/logging/OpenSearchJsonLayoutTests.java @@ -66,6 +66,46 @@ public void testLayout() { "%exceptionAsJson }" + System.lineSeparator())); } + public void testWithMaxMessageLengthLayout() { + OpenSearchJsonLayout server = OpenSearchJsonLayout.newBuilder() + .setType("server") + .setMaxMessageLength(42) + .build(); + String conversionPattern = server.getPatternLayout().getConversionPattern(); + + assertThat(conversionPattern, Matchers.equalTo( + "{" + + "\"type\": \"server\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:opensearch.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-42m}{JSON}\"" + + "%notEmpty{, %node_and_cluster_id }" + + "%exceptionAsJson }" + System.lineSeparator())); + } + + public void testWithUnrestrictedMaxMessageLengthLayout() { + OpenSearchJsonLayout server = OpenSearchJsonLayout.newBuilder() + .setType("server") + .setMaxMessageLength(0) + .build(); + String conversionPattern = server.getPatternLayout().getConversionPattern(); + + assertThat(conversionPattern, Matchers.equalTo( + "{" + + "\"type\": \"server\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:opensearch.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%m}{JSON}\"" + + "%notEmpty{, %node_and_cluster_id }" + + "%exceptionAsJson }" + System.lineSeparator())); + } + public void testLayoutWithAdditionalFields() { OpenSearchJsonLayout server = OpenSearchJsonLayout.newBuilder() .setType("server") diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java index 898ea12b6a6c3..29e40a4e179cb 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/ArchiveTests.java @@ -328,7 +328,7 @@ public void test54ForceBundledJdkEmptyJavaHome() throws Exception { public void test70CustomPathConfAndJvmOptions() throws Exception { withCustomConfig(tempConf -> { - final List jvmOptions = org.opensearch.common.collect.List.of("-Xms512m", "-Xmx512m", "-Dlog4j2.disable.jmx=true"); + final List jvmOptions = List.of("-Xms512m", "-Xmx512m", "-Dlog4j2.disable.jmx=true"); Files.write(tempConf.resolve("jvm.options"), jvmOptions, CREATE, APPEND); sh.getEnv().put("OPENSEARCH_JAVA_OPTS", "-XX:-UseCompressedOops"); diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java index aa52c3325bee5..d7005a17926ad 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileUtils.java @@ -83,7 +83,7 @@ public class FileUtils { public static List lsGlob(Path directory, String glob) { List paths = new ArrayList<>(); if (Files.exists(directory) == false) { - return org.opensearch.common.collect.List.of(); + return List.of(); } try (DirectoryStream stream = Files.newDirectoryStream(directory, glob)) { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index cea03a0b6fe70..b80ae422bda9a 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -322,7 +322,7 @@ private enum PackageManagerCommand { REMOVE } - private static Map RPM_OPTIONS = org.opensearch.common.collect.Map.of( + private static Map RPM_OPTIONS = Map.of( PackageManagerCommand.QUERY, "-qe", PackageManagerCommand.INSTALL, @@ -335,7 +335,7 @@ private enum PackageManagerCommand { "-e" ); - private static Map DEB_OPTIONS = org.opensearch.common.collect.Map.of( + private static Map DEB_OPTIONS = Map.of( PackageManagerCommand.QUERY, "-s", PackageManagerCommand.INSTALL, diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/SystemIndexRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/SystemIndexRestIT.java index e687e5eb8a151..9f2d686251947 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/SystemIndexRestIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/SystemIndexRestIT.java @@ -157,7 +157,7 @@ public List getRestHandlers(Settings settings, RestController restC IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster) { - return org.opensearch.common.collect.List.of(new AddDocRestHandler()); + return List.of(new AddDocRestHandler()); } @Override @@ -178,7 +178,7 @@ public String getName() { @Override public List routes() { - return org.opensearch.common.collect.List.of(new Route(RestRequest.Method.POST, "/_sys_index_test/add_doc/{id}")); + return List.of(new Route(RestRequest.Method.POST, "/_sys_index_test/add_doc/{id}")); } @Override @@ -186,7 +186,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli IndexRequest indexRequest = new IndexRequest(SYSTEM_INDEX_NAME); indexRequest.id(request.param("id")); indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - indexRequest.source(org.opensearch.common.collect.Map.of("some_field", "some_value")); + indexRequest.source(Map.of("some_field", "some_value")); return channel -> client.index(indexRequest, new RestStatusToXContentListener<>(channel, r -> r.getLocation(indexRequest.routing()))); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json index 0b1c65e551d63..a815cd5b1101b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json @@ -37,11 +37,6 @@ "description":"If `true`, the response only includes ongoing segment replication events", "default":false }, - "completed_only":{ - "type":"boolean", - "description":"If `true`, the response only includes latest completed segment replication events", - "default":false - }, "bytes":{ "type":"enum", "description":"The unit in which to display byte values", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.cluster_manager/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.cluster_manager/10_basic.yml index b0f1c81b56a0e..6354c751aa5f3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.cluster_manager/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.cluster_manager/10_basic.yml @@ -20,7 +20,7 @@ setup: host .+ \n ip .+ \n node .+ \n - + $/ --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yml index 0d945d01a6ddb..52b27837a61ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.health/10_basic.yml @@ -9,9 +9,9 @@ help: true node_selector: # Only send request to nodes in <2.0 versions, especially during ':qa:mixed-cluster:v1.x.x#mixedClusterTest'. - # Because YAML REST test takes the minimum OpenSearch version in the cluster to apply the filter in 'skip' section, + # Because YAML REST test takes the minimum OpenSearch version in the cluster to apply the filter in 'skip' section, # see OpenSearchClientYamlSuiteTestCase#initAndResetContext() for detail. - # During 'mixedClusterTest', the cluster can be mixed with nodes in 1.x and 2.x versions, + # During 'mixedClusterTest', the cluster can be mixed with nodes in 1.x and 2.x versions, # so node_selector is required, and only filtering version in 'skip' is not enough. version: "1.0.0 - 1.4.99" @@ -61,7 +61,7 @@ pending_tasks .+ \n max_task_wait_time .+ \n active_shards_percent .+ \n - + $/ --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml index fa973454cfba5..294f00bdd822b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml @@ -17,18 +17,18 @@ - is_true: cluster_uuid - is_true: master_node - + --- "Get cluster state returns cluster_manager_node": - skip: version: " - 1.4.99" reason: "The metric cluster_manager_node is added to cluster state in version 2.0.0" - + - do: cluster.state: {} - + - set: cluster_manager_node: node_id - + - match: {master_node: $node_id} - match: {cluster_manager_node: $node_id} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml index 74684901579a4..43a6f656c09c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml @@ -28,6 +28,6 @@ indices.get_alias: index: testind name: testali - + - match: { 'status': 404 } - match: { 'error': 'alias [testali] missing' } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 69061c6d0fbd4..2c878157f468e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -2,7 +2,7 @@ "Split index ignores target template mapping": - skip: features: allowed_warnings - + # create index - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml index 84ff85f465f8b..9d1ee7571f2e2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -138,5 +138,7 @@ - match: {pits.0.successful: true } - do: - catch: missing - delete_all_pits: { } + delete_all_pits: {} + + - match: {pits: []} + - length: {pits: 0} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index 61059f83f0e77..62180412dbf98 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -510,14 +510,12 @@ public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} if (index != null) { index.join(); } - assertBusy( - () -> { - assertEquals( - emptyList(), - client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").get().getTasks() - ); - } - ); + assertBusy(() -> { + assertEquals( + emptyList(), + client().admin().cluster().prepareListTasks().setActions("indices:data/write/index*").get().getTasks() + ); + }); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamIndexTemplateIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamIndexTemplateIT.java index ed500d72c3787..08f7fb17e5164 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamIndexTemplateIT.java @@ -8,8 +8,7 @@ package org.opensearch.action.admin.indices.datastream; -import org.opensearch.common.collect.List; - +import java.util.List; import java.util.concurrent.ExecutionException; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java index 785be061135f0..46f23e40f0864 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java @@ -13,12 +13,12 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.DataStream; -import org.opensearch.common.collect.List; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.RestStatus; import java.util.Arrays; +import java.util.List; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java index 4cf3f3564c8eb..53afa53de92f3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java @@ -105,18 +105,9 @@ protected Map, Object>> pluginScripts() { scripts.put("ctx._source.field2 = 'value2'", vars -> srcScript(vars, source -> source.replace("field2", "value2"))); - scripts.put( - "throw script exception on unknown var", - vars -> { - throw new ScriptException( - "message", - null, - Collections.emptyList(), - "exception on unknown var", - CustomScriptPlugin.NAME - ); - } - ); + scripts.put("throw script exception on unknown var", vars -> { + throw new ScriptException("message", null, Collections.emptyList(), "exception on unknown var", CustomScriptPlugin.NAME); + }); scripts.put("ctx.op = \"none\"", vars -> ((Map) vars.get("ctx")).put("op", "none")); scripts.put("ctx.op = \"delete\"", vars -> ((Map) vars.get("ctx")).put("op", "delete")); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java index babe228f544a9..ec617e88c249f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -99,11 +99,9 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier ) { - clusterService.getClusterSettings() - .addSettingsUpdateConsumer( - UPDATE_TEMPLATE_DUMMY_SETTING, - integer -> { logger.debug("the template dummy setting was updated to {}", integer); } - ); + clusterService.getClusterSettings().addSettingsUpdateConsumer(UPDATE_TEMPLATE_DUMMY_SETTING, integer -> { + logger.debug("the template dummy setting was updated to {}", integer); + }); return super.createComponents( client, clusterService, diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index f18ee28a2de91..24ac2f2326931 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -15,6 +15,7 @@ import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.rest.RestStatus; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -336,7 +337,6 @@ public void testGetWeightedRouting_ClusterManagerNotDiscovered() throws Exceptio ).admin().cluster().prepareGetWeightedRouting().setAwarenessAttribute("zone").setRequestLocal(true).get(); assertEquals(weightedRouting, weightedRoutingResponse.weights()); assertFalse(weightedRoutingResponse.getDiscoveredClusterManager()); - logger.info("--> network disruption is stopped"); networkDisruption.stopDisrupting(); @@ -639,28 +639,18 @@ public void testClusterHealthResponseWithEnsureNodeWeighedInParam() throws Excep Thread.sleep(13000); // Check cluster health for weighed in node when cluster manager is not discovered, health check should - // return a response with 200 status code - nodeLocalHealth = client(nodes_in_zone_a.get(0)).admin() - .cluster() - .prepareHealth() - .setLocal(true) - .setEnsureNodeWeighedIn(true) - .get(); - assertFalse(nodeLocalHealth.isTimedOut()); - assertFalse(nodeLocalHealth.hasDiscoveredClusterManager()); + // return a response with 503 status code + assertThrows( + ClusterManagerNotDiscoveredException.class, + () -> client(nodes_in_zone_a.get(0)).admin().cluster().prepareHealth().setLocal(true).setEnsureNodeWeighedIn(true).get() + ); // Check cluster health for weighed away node when cluster manager is not discovered, health check should - // return a response with 200 status code with cluster manager discovered as false - // ensure_node_weighed_in is not executed if cluster manager is not discovered - nodeLocalHealth = client(nodes_in_zone_c.get(0)).admin() - .cluster() - .prepareHealth() - .setLocal(true) - .setEnsureNodeWeighedIn(true) - .get(); - assertFalse(nodeLocalHealth.isTimedOut()); - assertFalse(nodeLocalHealth.hasDiscoveredClusterManager()); - + // return a response with 503 status code + assertThrows( + ClusterManagerNotDiscoveredException.class, + () -> client(nodes_in_zone_c.get(0)).admin().cluster().prepareHealth().setLocal(true).setEnsureNodeWeighedIn(true).get() + ); networkDisruption.stopDisrupting(); Thread.sleep(1000); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java new file mode 100644 index 0000000000000..ee5150c97fb4f --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -0,0 +1,273 @@ +/* + * Copyright OpenSearch Contributors. + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index; + +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.UUIDs; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.replication.SegmentReplicationBaseIT; +import org.opensearch.plugins.Plugin; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Arrays.asList; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationPressureIT extends SegmentReplicationBaseIT { + + private static final int MAX_CHECKPOINTS_BEHIND = 2; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(MAX_INDEXING_CHECKPOINTS.getKey(), MAX_CHECKPOINTS_BEHIND) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return asList(MockTransportService.TestPlugin.class); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6671") + public void testWritesRejected() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + final List replicaNodes = asList(replicaNode); + assertEqualSegmentInfosVersion(replicaNodes, primaryShard); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicInteger totalDocs = new AtomicInteger(0); + try (final Releasable ignored = blockReplication(replicaNodes, latch)) { + Thread indexingThread = new Thread(() -> { totalDocs.getAndSet(indexUntilCheckpointCount()); }); + indexingThread.start(); + indexingThread.join(); + latch.await(); + // index again while we are stale. + assertBusy(() -> { + expectThrows(OpenSearchRejectedExecutionException.class, () -> { + indexDoc(); + totalDocs.incrementAndGet(); + }); + }); + } + refresh(INDEX_NAME); + // wait for the replicas to catch up after block is released. + waitForSearchableDocs(totalDocs.get(), replicaNodes.toArray(new String[] {})); + + // index another doc showing there is no pressure enforced. + indexDoc(); + waitForSearchableDocs(totalDocs.incrementAndGet(), replicaNodes.toArray(new String[] {})); + verifyStoreContent(); + } + + /** + * This test ensures that a replica can be added while the index is under write block. + * Ensuring that only write requests are blocked. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6671") + public void testAddReplicaWhileWritesBlocked() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + final List replicaNodes = new ArrayList<>(); + replicaNodes.add(replicaNode); + assertEqualSegmentInfosVersion(replicaNodes, primaryShard); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicInteger totalDocs = new AtomicInteger(0); + try (final Releasable ignored = blockReplication(replicaNodes, latch)) { + Thread indexingThread = new Thread(() -> { totalDocs.getAndSet(indexUntilCheckpointCount()); }); + indexingThread.start(); + indexingThread.join(); + latch.await(); + // index again while we are stale. + assertBusy(() -> { + expectThrows(OpenSearchRejectedExecutionException.class, () -> { + indexDoc(); + totalDocs.incrementAndGet(); + }); + }); + final String replica_2 = internalCluster().startNode(); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 2)) + ); + ensureGreen(INDEX_NAME); + replicaNodes.add(replica_2); + waitForSearchableDocs(totalDocs.get(), replica_2); + } + refresh(INDEX_NAME); + // wait for the replicas to catch up after block is released. + waitForSearchableDocs(totalDocs.get(), replicaNodes.toArray(new String[] {})); + + // index another doc showing there is no pressure enforced. + indexDoc(); + waitForSearchableDocs(totalDocs.incrementAndGet(), replicaNodes.toArray(new String[] {})); + verifyStoreContent(); + } + + public void testBelowReplicaLimit() throws Exception { + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3).build(); + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME, settings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + List replicaNodes = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + replicaNodes.add(internalCluster().startNode()); + } + ensureGreen(INDEX_NAME); + + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + assertEqualSegmentInfosVersion(replicaNodes, primaryShard); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicInteger totalDocs = new AtomicInteger(0); + // only block a single replica, pressure should not get applied. + try (final Releasable ignored = blockReplication(replicaNodes.subList(0, 1), latch)) { + Thread indexingThread = new Thread(() -> totalDocs.getAndSet(indexUntilCheckpointCount())); + indexingThread.start(); + indexingThread.join(); + latch.await(); + indexDoc(); + totalDocs.incrementAndGet(); + refresh(INDEX_NAME); + } + // index another doc showing there is no pressure enforced. + indexDoc(); + refresh(INDEX_NAME); + waitForSearchableDocs(totalDocs.incrementAndGet(), replicaNodes.toArray(new String[] {})); + verifyStoreContent(); + } + + public void testBulkWritesRejected() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + final String coordinator = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + ensureGreen(INDEX_NAME); + + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + final List replicaNodes = asList(replicaNode); + assertEqualSegmentInfosVersion(replicaNodes, primaryShard); + + final CountDownLatch latch = new CountDownLatch(1); + List nodes = List.of(primaryNode, replicaNode, coordinator); + + int docsPerBatch = randomIntBetween(1, 200); + int totalDocs = docsPerBatch * MAX_CHECKPOINTS_BEHIND; + try (final Releasable ignored = blockReplication(replicaNodes, latch)) { + Thread indexingThread = new Thread(() -> { + for (int i = 0; i < MAX_CHECKPOINTS_BEHIND + 1; i++) { + executeBulkRequest(nodes, docsPerBatch); + refresh(INDEX_NAME); + } + }); + indexingThread.start(); + indexingThread.join(); + latch.await(); + // try and index again while we are stale. + assertBusy(() -> { assertFailedRequests(executeBulkRequest(nodes, randomIntBetween(1, 200))); }); + } + refresh(INDEX_NAME); + // wait for the replicas to catch up after block is released. + waitForSearchableDocs(totalDocs, replicaNodes.toArray(new String[] {})); + + // index another doc showing there is no pressure enforced. + executeBulkRequest(nodes, totalDocs); + waitForSearchableDocs(totalDocs * 2L, replicaNodes.toArray(new String[] {})); + verifyStoreContent(); + } + + private BulkResponse executeBulkRequest(List nodes, int docsPerBatch) { + final BulkRequest bulkRequest = new BulkRequest(); + for (int j = 0; j < docsPerBatch; ++j) { + IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()) + .source(Collections.singletonMap("key", randomAlphaOfLength(50))); + bulkRequest.add(request); + } + final BulkResponse bulkItemResponses = client(randomFrom(nodes)).bulk(bulkRequest).actionGet(); + refresh(INDEX_NAME); + return bulkItemResponses; + } + + /** + * Index and Refresh in batches to force checkpoints behind. + * Asserts that there are no stale replicas according to the primary until cp count is reached. + */ + private int indexUntilCheckpointCount() { + int total = 0; + for (int i = 0; i < MAX_CHECKPOINTS_BEHIND; i++) { + final int numDocs = randomIntBetween(1, 100); + for (int j = 0; j < numDocs; ++j) { + indexDoc(); + } + total += numDocs; + refresh(INDEX_NAME); + } + return total; + } + + private void assertFailedRequests(BulkResponse response) { + assertTrue(Arrays.stream(response.getItems()).allMatch(BulkItemResponse::isFailed)); + assertTrue( + Arrays.stream(response.getItems()) + .map(BulkItemResponse::getFailure) + .allMatch((failure) -> failure.getStatus() == RestStatus.TOO_MANY_REQUESTS) + ); + } + + private void indexDoc() { + client().prepareIndex(INDEX_NAME).setId(UUIDs.base64UUID()).setSource("{}", "{}").get(); + } + + private void assertEqualSegmentInfosVersion(List replicaNames, IndexShard primaryShard) { + for (String replicaName : replicaNames) { + final IndexShard replicaShard = getIndexShard(replicaName, INDEX_NAME); + assertEquals( + primaryShard.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replicaShard.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java index 45fbb2651a96d..73d6d9aff7b72 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java @@ -288,10 +288,9 @@ public void testInvalidIndexName() { } public void testDocumentWithBlankFieldName() { - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> { client().prepareIndex("test").setId("1").setSource("", "value1_2").execute().actionGet(); } - ); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> { + client().prepareIndex("test").setId("1").setSource("", "value1_2").execute().actionGet(); + }); assertThat(e.getMessage(), containsString("failed to parse")); assertThat(e.getRootCause().getMessage(), containsString("field name cannot be an empty string")); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java new file mode 100644 index 0000000000000..c95c8e30342af --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -0,0 +1,267 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexModule; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +import org.opensearch.cluster.OpenSearchAllocationTestCase.ShardAllocations; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationAllocationIT extends SegmentReplicationBaseIT { + + private void createIndex(String idxName, int shardCount, int replicaCount, boolean isSegRep) { + Settings.Builder builder = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicaCount); + if (isSegRep) { + builder = builder.put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } else { + builder = builder.put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT); + } + prepareCreate(idxName, builder).get(); + } + + public void enablePreferPrimaryBalance() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), "true")) + ); + } + + /** + * This test verifies that the overall primary balance is attained during allocation. This test verifies primary + * balance per index and across all indices is maintained. + * @throws Exception + */ + public void testGlobalPrimaryAllocation() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 1; + final int maxShardCount = 1; + final int nodeCount = randomIntBetween(maxReplicaCount + 1, 10); + final int numberOfIndices = randomIntBetween(5, 10); + + final List nodeNames = new ArrayList<>(); + logger.info("--> Creating {} nodes", nodeCount); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + enablePreferPrimaryBalance(); + int shardCount, replicaCount; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(0, maxReplicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + logger.info("--> Creating index {} with shard count {} and replica count {}", "test" + i, shardCount, replicaCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(); + } + + /** + * This test verifies the happy path where primary shard allocation is balanced when multiple indices are created. + * + * This test in general passes without primary shard balance as well due to nature of allocation algorithm which + * assigns all primary shards first followed by replica copies. + */ + public void testPerIndexPrimaryAllocation() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 2; + final int maxShardCount = 5; + final int nodeCount = randomIntBetween(maxReplicaCount + 1, 10); + final int numberOfIndices = randomIntBetween(5, 10); + + final List nodeNames = new ArrayList<>(); + logger.info("--> Creating {} nodes", nodeCount); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + enablePreferPrimaryBalance(); + int shardCount, replicaCount; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(0, maxReplicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + logger.info("--> Creating index {} with shard count {} and replica count {}", "test" + i, shardCount, replicaCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + } + verifyPerIndexPrimaryBalance(); + } + + /** + * This test verifies balanced primary shard allocation for a single index with large shard count in event of node + * going down and a new node joining the cluster. The results in shard distribution skewness and re-balancing logic + * ensures the primary shard distribution is balanced. + * + */ + public void testSingleIndexShardAllocation() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 1; + final int maxShardCount = 50; + final int nodeCount = 5; + + final List nodeNames = new ArrayList<>(); + logger.info("--> Creating {} nodes", nodeCount); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + enablePreferPrimaryBalance(); + + ClusterState state; + createIndex("test", maxShardCount, maxReplicaCount, true); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + + // Remove a node + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeNames.get(0))); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + + // Add a new node + internalCluster().startDataOnlyNode(); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + } + + /** + * Similar to testSingleIndexShardAllocation test but creates multiple indices, multiple node adding in and getting + * removed. The test asserts post each such event that primary shard distribution is balanced across single index. + */ + public void testAllocationWithDisruption() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 2; + final int maxShardCount = 5; + final int nodeCount = randomIntBetween(maxReplicaCount + 1, 10); + final int numberOfIndices = randomIntBetween(1, 10); + + logger.info("--> Creating {} nodes", nodeCount); + final List nodeNames = new ArrayList<>(); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + enablePreferPrimaryBalance(); + + int shardCount, replicaCount, totalShardCount = 0, totalReplicaCount = 0; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + totalShardCount += shardCount; + replicaCount = randomIntBetween(1, maxReplicaCount); + totalReplicaCount += replicaCount; + logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + ensureGreen(TimeValue.timeValueSeconds(60)); + if (logger.isTraceEnabled()) { + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + } + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + + final int additionalNodeCount = randomIntBetween(1, 5); + logger.info("--> Adding {} nodes", additionalNodeCount); + + internalCluster().startNodes(additionalNodeCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + + logger.info("--> Stop one third nodes"); + for (int i = 0; i < nodeCount; i += 3) { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeNames.get(i))); + // give replica a chance to promote as primary before terminating node containing the replica + ensureGreen(TimeValue.timeValueSeconds(60)); + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + } + + /** + * Utility method which ensures cluster has balanced primary shard distribution across a single index. + * @throws Exception exception + */ + private void verifyPerIndexPrimaryBalance() throws Exception { + assertBusy(() -> { + final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState(); + RoutingNodes nodes = currentState.getRoutingNodes(); + for (ObjectObjectCursor index : currentState.getRoutingTable().indicesRouting()) { + final int totalPrimaryShards = index.value.primaryShardsActive(); + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()); + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(index.key, STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= avgPrimaryShardsPerNode); + } + } + }, 60, TimeUnit.SECONDS); + } + + private void verifyPrimaryBalance() throws Exception { + assertBusy(() -> { + final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState(); + RoutingNodes nodes = currentState.getRoutingNodes(); + int totalPrimaryShards = 0; + for (ObjectObjectCursor index : currentState.getRoutingTable().indicesRouting()) { + totalPrimaryShards += index.value.primaryShardsActive(); + } + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()); + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= avgPrimaryShardsPerNode); + } + }, 60, TimeUnit.SECONDS); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index dfffeaf860734..e9626e6ecc0bd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -16,11 +17,13 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; +import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; @@ -29,12 +32,14 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -134,6 +139,24 @@ protected void waitForSearchableDocs(long docCount, String... nodes) throws Exce waitForSearchableDocs(docCount, Arrays.stream(nodes).collect(Collectors.toList())); } + protected void waitForSegmentReplication(String node) throws Exception { + assertBusy(() -> { + SegmentReplicationStatsResponse segmentReplicationStatsResponse = client(node).admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .setDetailed(true) + .execute() + .actionGet(); + final SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats() + .get(INDEX_NAME) + .get(0); + assertEquals( + perGroupStats.getReplicaStats().stream().findFirst().get().getCurrentReplicationState().getStage(), + SegmentReplicationState.Stage.DONE + ); + }, 1, TimeUnit.MINUTES); + } + protected void verifyStoreContent() throws Exception { assertBusy(() -> { final ClusterState clusterState = getClusterState(); @@ -182,4 +205,31 @@ protected IndexShard getIndexShard(String node, String indexName) { return indexService.getShard(shardId.get()); } + protected Releasable blockReplication(List nodes, CountDownLatch latch) { + CountDownLatch pauseReplicationLatch = new CountDownLatch(nodes.size()); + for (String node : nodes) { + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + node + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES)) { + try { + latch.countDown(); + pauseReplicationLatch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + connection.sendRequest(requestId, action, request, options); + }); + } + return () -> { + while (pauseReplicationLatch.getCount() > 0) { + pauseReplicationLatch.countDown(); + } + }; + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 9975a5ff65a34..51c0c8710d39d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -9,31 +9,52 @@ package org.opensearch.indices.replication; import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexModule; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.NodeClosedException; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import static java.util.Arrays.asList; import static org.opensearch.index.query.QueryBuilders.matchQuery; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -284,6 +305,105 @@ public void testReplicationAfterForceMerge() throws Exception { } } + /** + * This test verifies that segment replication does not fail for closed indices + */ + public void testClosedIndices() { + internalCluster().startClusterManagerOnlyNode(); + List nodes = new ArrayList<>(); + // start 1st node so that it contains the primary + nodes.add(internalCluster().startNode()); + createIndex(INDEX_NAME, super.indexSettings()); + ensureYellowAndNoInitializingShards(INDEX_NAME); + // start 2nd node so that it contains the replica + nodes.add(internalCluster().startNode()); + ensureGreen(INDEX_NAME); + + logger.info("--> Close index"); + assertAcked(client().admin().indices().prepareClose(INDEX_NAME)); + + logger.info("--> waiting for allocation to have shards assigned"); + waitForRelocation(ClusterHealthStatus.GREEN); + } + + /** + * This test validates the primary node drop does not result in shard failure on replica. + * @throws Exception + */ + public void testNodeDropWithOngoingReplication() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + // Get replica allocation id + final String replicaAllocationId = state.routingTable() + .index(INDEX_NAME) + .shardsWithState(ShardRoutingState.STARTED) + .stream() + .filter(routing -> routing.primary() == false) + .findFirst() + .get() + .allocationId() + .getId(); + DiscoveryNode primaryDiscovery = state.nodes().resolveNode(primaryNode); + + CountDownLatch blockFileCopy = new CountDownLatch(1); + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + FileChunkRequest req = (FileChunkRequest) request; + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + if (req.name().endsWith("cfs") && req.lastChunk()) { + try { + blockFileCopy.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new NodeClosedException(primaryDiscovery); + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + final int docCount = scaledRandomIntBetween(10, 200); + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + // Refresh, this should trigger round of segment replication + refresh(INDEX_NAME); + blockFileCopy.countDown(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { assertDocCounts(docCount, replicaNode); }); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + // replica now promoted as primary should have same allocation id + final String currentAllocationID = state.routingTable() + .index(INDEX_NAME) + .shardsWithState(ShardRoutingState.STARTED) + .stream() + .filter(routing -> routing.primary()) + .findFirst() + .get() + .allocationId() + .getId(); + assertEquals(currentAllocationID, replicaAllocationId); + } + public void testCancellation() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); @@ -423,6 +543,68 @@ public void testDeleteOperations() throws Exception { } } + /** + * This tests that the max seqNo we send to replicas is accurate and that after failover + * the new primary starts indexing from the correct maxSeqNo and replays the correct count of docs + * from xlog. + */ + public void testReplicationPostDeleteAndForceMerge() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + final int initialDocCount = scaledRandomIntBetween(10, 200); + for (int i = 0; i < initialDocCount; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount, primary, replica); + + final int deletedDocCount = randomIntBetween(10, initialDocCount); + for (int i = 0; i < deletedDocCount; i++) { + client(primary).prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + } + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + + // randomly flush here after the force merge to wipe any old segments. + if (randomBoolean()) { + flush(INDEX_NAME); + } + + final IndexShard primaryShard = getIndexShard(primary, INDEX_NAME); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + assertBusy( + () -> assertEquals( + primaryShard.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replicaShard.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ) + ); + + // add some docs to the xlog and drop primary. + final int additionalDocs = randomIntBetween(1, 50); + for (int i = initialDocCount; i < initialDocCount + additionalDocs; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + // Drop the primary and wait until replica is promoted. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + + final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); + assertNotNull(replicaShardRouting); + assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); + refresh(INDEX_NAME); + final long expectedHitCount = initialDocCount + additionalDocs - deletedDocCount; + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + int expectedMaxSeqNo = initialDocCount + deletedDocCount + additionalDocs - 1; + assertEquals(expectedMaxSeqNo, replicaShard.seqNoStats().getMaxSeqNo()); + + // index another doc. + client().prepareIndex(INDEX_NAME).setId(String.valueOf(expectedMaxSeqNo + 1)).setSource("another", "doc").get(); + refresh(INDEX_NAME); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount + 1); + } + public void testUpdateOperations() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String primary = internalCluster().startDataOnlyNode(); @@ -516,4 +698,140 @@ public void testDropPrimaryDuringReplication() throws Exception { verifyStoreContent(); } } + + public void testReplicaHasDiffFilesThanPrimary() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + IndexWriterConfig iwc = newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.APPEND); + + // create a doc to index + int numDocs = 2 + random().nextInt(100); + + List docs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add( + new TextField( + "body", + TestUtil.randomRealisticUnicodeString(random()), + random().nextBoolean() ? Field.Store.YES : Field.Store.NO + ) + ); + doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); + docs.add(doc); + } + // create some segments on the replica before copy. + try (IndexWriter writer = new IndexWriter(replicaShard.store().directory(), iwc)) { + for (Document d : docs) { + writer.addDocument(d); + } + writer.flush(); + writer.commit(); + } + + final SegmentInfos segmentInfos = SegmentInfos.readLatestCommit(replicaShard.store().directory()); + replicaShard.finalizeReplication(segmentInfos); + + final int docCount = scaledRandomIntBetween(10, 200); + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + refresh(INDEX_NAME); + } + // Refresh, this should trigger round of segment replication + assertBusy(() -> { assertDocCounts(docCount, replicaNode); }); + final IndexShard replicaAfterFailure = getIndexShard(replicaNode, INDEX_NAME); + assertNotEquals(replicaAfterFailure.routingEntry().allocationId().getId(), replicaShard.routingEntry().allocationId().getId()); + } + + public void testPressureServiceStats() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + int initialDocCount = scaledRandomIntBetween(100, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + + SegmentReplicationPressureService pressureService = internalCluster().getInstance( + SegmentReplicationPressureService.class, + primaryNode + ); + + final Map shardStats = pressureService.nodeStats().getShardStats(); + assertEquals(1, shardStats.size()); + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + IndexShard replica = getIndexShard(replicaNode, INDEX_NAME); + SegmentReplicationPerGroupStats groupStats = shardStats.get(primaryShard.shardId()); + Set replicaStats = groupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + + // assert replica node returns nothing. + SegmentReplicationPressureService replicaNode_service = internalCluster().getInstance( + SegmentReplicationPressureService.class, + replicaNode + ); + assertTrue(replicaNode_service.nodeStats().getShardStats().isEmpty()); + + // drop the primary, this won't hand off SR state. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + replicaNode_service = internalCluster().getInstance(SegmentReplicationPressureService.class, replicaNode); + replica = getIndexShard(replicaNode, INDEX_NAME); + assertTrue("replica should be promoted as a primary", replica.routingEntry().primary()); + assertEquals(1, replicaNode_service.nodeStats().getShardStats().size()); + // we don't have a replica assigned yet, so this should be 0. + assertEquals(0, replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats().size()); + + // start another replica. + String replicaNode_2 = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + String docId = String.valueOf(initialDocCount + 1); + client().prepareIndex(INDEX_NAME).setId(docId).setSource("foo", "bar").get(); + refresh(INDEX_NAME); + waitForSearchableDocs(initialDocCount + 1, replicaNode_2); + + replicaNode_service = internalCluster().getInstance(SegmentReplicationPressureService.class, replicaNode); + replica = getIndexShard(replicaNode_2, INDEX_NAME); + assertEquals(1, replicaNode_service.nodeStats().getShardStats().size()); + replicaStats = replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats(); + assertEquals(1, replicaStats.size()); + + // test a checkpoint without any new segments + flush(INDEX_NAME); + assertBusy(() -> { + final SegmentReplicationPressureService service = internalCluster().getInstance( + SegmentReplicationPressureService.class, + replicaNode + ); + assertEquals(1, service.nodeStats().getShardStats().size()); + final Set shardStatsSet = service.nodeStats() + .getShardStats() + .get(primaryShard.shardId()) + .getReplicaStats(); + assertEquals(1, shardStatsSet.size()); + final SegmentReplicationShardStats stats = shardStatsSet.stream().findFirst().get(); + assertEquals(0, stats.getCheckpointsBehindCount()); + }); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index a47b737c3e198..58e0686385d5e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -12,6 +12,7 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.ClusterState; @@ -33,6 +34,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; /** * This test class verifies primary shard relocation with segment replication as replication strategy. @@ -494,4 +496,67 @@ public void testAddNewReplicaFailure() throws Exception { assertTrue(clusterHealthResponse.isTimedOut()); ensureYellow(INDEX_NAME); } + + public void testFlushAfterRelocation() throws Exception { + // Starting two nodes with primary and replica shards respectively. + final String primaryNode = internalCluster().startNode(); + prepareCreate( + INDEX_NAME, + Settings.builder() + // we want to control refreshes + .put("index.refresh_interval", -1) + ).get(); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + // Start another empty node for relocation + final String newPrimary = internalCluster().startNode(); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("3") + .execute() + .actionGet(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + ensureGreen(INDEX_NAME); + + // Start indexing docs + final int initialDocCount = scaledRandomIntBetween(2000, 3000); + for (int i = 0; i < initialDocCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + } + + // Verify segment replication event never happened on replica shard + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .execute() + .actionGet(); + assertTrue(segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0).getReplicaStats().isEmpty()); + + // Relocate primary to new primary. When new primary starts it does perform a flush. + logger.info("--> relocate the shard from primary to newPrimary"); + ActionFuture relocationListener = client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, primaryNode, newPrimary)) + .execute(); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(ACCEPTABLE_RELOCATION_TIME) + .execute() + .actionGet(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + // Verify if all docs are present in replica after relocation, if new relocated primary doesn't flush after relocation the below + // assert will fail. + assertBusy(() -> { + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setPreference("_only_local").setSize(0).get(), initialDocCount); + }); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 6969f28a784d9..d162e51616831 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -8,15 +8,21 @@ package org.opensearch.indices.replication; -import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -54,22 +60,24 @@ public void testSegmentReplicationStatsResponse() throws Exception { SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() .indices() .prepareSegmentReplicationStats(INDEX_NAME) + .setDetailed(true) .execute() .actionGet(); - assertEquals(segmentReplicationStatsResponse.shardSegmentReplicationStates().size(), 1); + SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0); + final SegmentReplicationState currentReplicationState = perGroupStats.getReplicaStats() + .stream() + .findFirst() + .get() + .getCurrentReplicationState(); + assertEquals(segmentReplicationStatsResponse.getReplicationStats().size(), 1); assertEquals(segmentReplicationStatsResponse.getTotalShards(), numShards * 2); assertEquals(segmentReplicationStatsResponse.getSuccessfulShards(), numShards * 2); - assertEquals( - segmentReplicationStatsResponse.shardSegmentReplicationStates().get(INDEX_NAME).get(0).getStage(), - SegmentReplicationState.Stage.DONE - ); - assertTrue( - segmentReplicationStatsResponse.shardSegmentReplicationStates().get(INDEX_NAME).get(0).getIndex().recoveredFileCount() > 0 - ); + assertEquals(currentReplicationState.getStage(), SegmentReplicationState.Stage.DONE); + assertTrue(currentReplicationState.getIndex().recoveredFileCount() > 0); }, 1, TimeUnit.MINUTES); } - public void testSegmentReplicationStatsResponseForActiveAndCompletedOnly() throws Exception { + public void testSegmentReplicationStatsResponseForActiveOnly() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); @@ -121,53 +129,231 @@ public void testSegmentReplicationStatsResponseForActiveAndCompletedOnly() throw .indices() .prepareSegmentReplicationStats(INDEX_NAME) .setActiveOnly(true) + .setDetailed(true) .execute() .actionGet(); - assertEquals( - activeOnlyResponse.shardSegmentReplicationStates().get(INDEX_NAME).get(0).getStage(), - SegmentReplicationState.Stage.GET_FILES + SegmentReplicationPerGroupStats perGroupStats = activeOnlyResponse.getReplicationStats().get(INDEX_NAME).get(0); + SegmentReplicationState.Stage stage = perGroupStats.getReplicaStats() + .stream() + .findFirst() + .get() + .getCurrentReplicationState() + .getStage(); + assertEquals(SegmentReplicationState.Stage.GET_FILES, stage); + waitForAssertions.countDown(); + } + + public void testNonDetailedResponse() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + int numReplicas = 4; + List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startNode(); + nodes.add(primaryNode); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() ); + ensureYellow(INDEX_NAME); + for (int i = 0; i < numReplicas; i++) { + nodes.add(internalCluster().startNode()); + } + ensureGreen(INDEX_NAME); + + final long numDocs = scaledRandomIntBetween(50, 100); + for (int i = 0; i < numDocs; i++) { + index(INDEX_NAME, "doc", Integer.toString(i)); + } + refresh(INDEX_NAME); + waitForSearchableDocs(numDocs, nodes); + + final IndexShard indexShard = getIndexShard(primaryNode, INDEX_NAME); + + assertBusy(() -> { + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .execute() + .actionGet(); - // verifying completed_only by checking if current stage is DONE - SegmentReplicationStatsResponse completedOnlyResponse = client().admin() + final Map> replicationStats = segmentReplicationStatsResponse + .getReplicationStats(); + assertEquals(1, replicationStats.size()); + final List replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(1, replicationPerGroupStats.size()); + final SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), indexShard.shardId()); + final Set replicaStats = perGroupStats.getReplicaStats(); + assertEquals(4, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } + }); + } + + public void testGetSpecificShard() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startNode(); + nodes.add(primaryNode); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build() + ); + ensureYellowAndNoInitializingShards(INDEX_NAME); + nodes.add(internalCluster().startNode()); + ensureGreen(INDEX_NAME); + + final long numDocs = scaledRandomIntBetween(50, 100); + for (int i = 0; i < numDocs; i++) { + index(INDEX_NAME, "doc", Integer.toString(i)); + } + refresh(INDEX_NAME); + waitForSearchableDocs(numDocs, nodes); + + final IndexShard indexShard = getIndexShard(primaryNode, INDEX_NAME); + + // search for all + SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin() .indices() .prepareSegmentReplicationStats(INDEX_NAME) - .setCompletedOnly(true) + .setActiveOnly(true) .execute() .actionGet(); - assertEquals(completedOnlyResponse.shardSegmentReplicationStates().size(), SHARD_COUNT); - assertEquals( - completedOnlyResponse.shardSegmentReplicationStates().get(INDEX_NAME).get(0).getStage(), - SegmentReplicationState.Stage.DONE - ); - assertTrue(completedOnlyResponse.shardSegmentReplicationStates().get(INDEX_NAME).get(0).getIndex().recoveredFileCount() > 0); - waitForAssertions.countDown(); + + Map> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); + assertEquals(1, replicationStats.size()); + List replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(2, replicationPerGroupStats.size()); + for (SegmentReplicationPerGroupStats group : replicationPerGroupStats) { + assertEquals(1, group.getReplicaStats().size()); + } + + // now search for one shard. + final int id = indexShard.shardId().getId(); + segmentReplicationStatsResponse = client().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .setActiveOnly(true) + .shards(String.valueOf(id)) + .execute() + .actionGet(); + + replicationStats = segmentReplicationStatsResponse.getReplicationStats(); + assertEquals(1, replicationStats.size()); + replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(1, replicationPerGroupStats.size()); + for (SegmentReplicationPerGroupStats group : replicationPerGroupStats) { + assertEquals(group.getShardId(), indexShard.shardId()); + assertEquals(1, group.getReplicaStats().size()); + } + } - public void testSegmentReplicationStatsResponseOnDocumentReplicationIndex() { + public void testMultipleIndices() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String index_2 = "tst-index-2"; + List nodes = new ArrayList<>(); final String primaryNode = internalCluster().startNode(); - prepareCreate( - INDEX_NAME, - Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + nodes.add(primaryNode); + createIndex(INDEX_NAME, index_2); + + ensureYellowAndNoInitializingShards(INDEX_NAME, index_2); + nodes.add(internalCluster().startNode()); + ensureGreen(INDEX_NAME, index_2); + + final long numDocs = scaledRandomIntBetween(50, 100); + for (int i = 0; i < numDocs; i++) { + index(INDEX_NAME, "doc", Integer.toString(i)); + index(index_2, "doc", Integer.toString(i)); + } + refresh(INDEX_NAME, index_2); + waitForSearchableDocs(INDEX_NAME, numDocs, nodes); + waitForSearchableDocs(index_2, numDocs, nodes); + + final IndexShard index_1_primary = getIndexShard(primaryNode, INDEX_NAME); + final IndexShard index_2_primary = getIndexShard(primaryNode, index_2); + + assertTrue(index_1_primary.routingEntry().primary()); + assertTrue(index_2_primary.routingEntry().primary()); + + // test both indices are returned in the response. + SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin() + .indices() + .prepareSegmentReplicationStats() + .execute() + .actionGet(); + + Map> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); + assertEquals(2, replicationStats.size()); + List replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(1, replicationPerGroupStats.size()); + SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); + Set replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } + + replicationPerGroupStats = replicationStats.get(index_2); + assertEquals(1, replicationPerGroupStats.size()); + perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); + replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } + + // test only single index queried. + segmentReplicationStatsResponse = client().admin() + .indices() + .prepareSegmentReplicationStats() + .setIndices(index_2) + .execute() + .actionGet(); + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size()); + assertTrue(segmentReplicationStatsResponse.getReplicationStats().containsKey(index_2)); + } - ).get(); + public void testQueryAgainstDocRepIndex() { + internalCluster().startClusterManagerOnlyNode(); + List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startNode(); + nodes.add(primaryNode); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .build() + ); ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replicaNode = internalCluster().startNode(); + nodes.add(internalCluster().startNode()); ensureGreen(INDEX_NAME); - // index 10 docs - for (int i = 0; i < 10; i++) { - client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + final long numDocs = scaledRandomIntBetween(50, 100); + for (int i = 0; i < numDocs; i++) { + index(INDEX_NAME, "doc", Integer.toString(i)); } refresh(INDEX_NAME); - OpenSearchStatusException exception = assertThrows( - OpenSearchStatusException.class, - () -> client().admin().indices().prepareSegmentReplicationStats(INDEX_NAME).execute().actionGet() - ); - // Verify exception message - String expectedMessage = "Segment Replication is not enabled on Index: test-idx-1"; - assertEquals(expectedMessage, exception.getMessage()); + // search for all + SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .execute() + .actionGet(); + assertTrue(segmentReplicationStatsResponse.getReplicationStats().isEmpty()); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java index 9970ff99a806c..6e94c50eec42a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java @@ -136,11 +136,9 @@ public static class DummySettingPlugin extends Plugin { @Override public void onIndexModule(IndexModule indexModule) { - indexModule.addSettingsUpdateConsumer( - DUMMY_SETTING, - (s) -> {}, - (s) -> { if (s.equals("boom")) throw new IllegalArgumentException("this setting goes boom"); } - ); + indexModule.addSettingsUpdateConsumer(DUMMY_SETTING, (s) -> {}, (s) -> { + if (s.equals("boom")) throw new IllegalArgumentException("this setting goes boom"); + }); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index e8d425596beb0..be69428453952 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -42,6 +42,7 @@ import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.Map; import java.util.stream.IntStream; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; @@ -62,7 +63,7 @@ public void testRequestBreaker() throws Exception { .mapToObj( i -> client().prepareIndex("test") .setId("id_" + i) - .setSource(org.opensearch.common.collect.Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) + .setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) ) .toArray(IndexRequestBuilder[]::new) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java index f3d1a479f1b46..0e9e409efae59 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java @@ -69,7 +69,7 @@ public class AggregationProfilerIT extends OpenSearchIntegTestCase { private static final String INITIALIZE = AggregationTimingType.INITIALIZE.toString(); private static final String BUILD_AGGREGATION = AggregationTimingType.BUILD_AGGREGATION.toString(); private static final String REDUCE = AggregationTimingType.REDUCE.toString(); - private static final Set BREAKDOWN_KEYS = org.opensearch.common.collect.Set.of( + private static final Set BREAKDOWN_KEYS = Set.of( INITIALIZE, BUILD_LEAF_COLLECTOR, COLLECT, @@ -107,7 +107,7 @@ protected void setupSuiteScopeCluster() throws Exception { client().admin() .indices() .prepareCreate("idx") - .setSettings(org.opensearch.common.collect.Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) .setMapping(STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword") .get() ); @@ -166,7 +166,7 @@ public void testSimpleProfile() { assertThat(breakdown.get(REDUCE), equalTo(0L)); Map debug = histoAggResult.getDebugInfo(); assertThat(debug, notNullValue()); - assertThat(debug.keySet(), equalTo(org.opensearch.common.collect.Set.of(TOTAL_BUCKETS))); + assertThat(debug.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); assertThat(((Number) debug.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); } } @@ -209,7 +209,7 @@ public void testMultiLevelProfile() { assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); Map histoDebugInfo = histoAggResult.getDebugInfo(); assertThat(histoDebugInfo, notNullValue()); - assertThat(histoDebugInfo.keySet(), equalTo(org.opensearch.common.collect.Set.of(TOTAL_BUCKETS))); + assertThat(histoDebugInfo.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); assertThat(((Number) histoDebugInfo.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); @@ -240,7 +240,7 @@ public void testMultiLevelProfile() { assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); } } @@ -295,7 +295,7 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); Map histoDebugInfo = histoAggResult.getDebugInfo(); assertThat(histoDebugInfo, notNullValue()); - assertThat(histoDebugInfo.keySet(), equalTo(org.opensearch.common.collect.Set.of(TOTAL_BUCKETS))); + assertThat(histoDebugInfo.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); assertThat(((Number) histoDebugInfo.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); @@ -326,7 +326,7 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); } } @@ -366,10 +366,7 @@ public void testDiversifiedAggProfile() { assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); - assertThat( - diversifyAggResult.getDebugInfo(), - equalTo(org.opensearch.common.collect.Map.of(DEFERRED, org.opensearch.common.collect.List.of("max"))) - ); + assertThat(diversifyAggResult.getDebugInfo(), equalTo(Map.of(DEFERRED, List.of("max")))); assertThat(diversifyAggResult.getProfiledChildren().size(), equalTo(1)); ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); @@ -386,7 +383,7 @@ public void testDiversifiedAggProfile() { assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); } } @@ -441,7 +438,7 @@ public void testComplexProfile() { assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); Map histoDebugInfo = histoAggResult.getDebugInfo(); assertThat(histoDebugInfo, notNullValue()); - assertThat(histoDebugInfo.keySet(), equalTo(org.opensearch.common.collect.Set.of(TOTAL_BUCKETS))); + assertThat(histoDebugInfo.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); assertThat(((Number) histoDebugInfo.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); @@ -482,7 +479,7 @@ public void testComplexProfile() { assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); @@ -498,7 +495,7 @@ public void testComplexProfile() { assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); @@ -534,7 +531,7 @@ public void testComplexProfile() { assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); maxAggResult = stringsAggResultSubAggregations.get("max"); @@ -550,7 +547,7 @@ public void testComplexProfile() { assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); tagsAggResult = stringsAggResultSubAggregations.get("tags"); @@ -587,7 +584,7 @@ public void testComplexProfile() { assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); maxAggResult = tagsAggResultSubAggregations.get("max"); @@ -603,7 +600,7 @@ public void testComplexProfile() { assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(org.opensearch.common.collect.Map.of())); + assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index b969c0c4b8e24..5f5e0a0912140 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -15,6 +15,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; @@ -23,21 +24,27 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.collect.Map; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.env.NodeEnvironment; import org.opensearch.index.Index; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.repositories.fs.FsRepository; +import java.io.IOException; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.List; +import java.util.Map; +import java.util.stream.Stream; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -46,6 +53,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public final class SearchableSnapshotIT extends AbstractSnapshotIntegTestCase { @@ -64,13 +72,14 @@ protected Settings featureFlagSettings() { protected Settings.Builder randomRepositorySettings() { final Settings.Builder settings = Settings.builder(); settings.put("location", randomRepoPath()).put("compress", randomBoolean()); + settings.put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path"); return settings; } private Settings.Builder chunkedRepositorySettings() { final Settings.Builder settings = Settings.builder(); settings.put("location", randomRepoPath()).put("compress", randomBoolean()); - settings.put("chunk_size", 2 << 13, ByteSizeUnit.BYTES); + settings.put("chunk_size", 2 << 23, ByteSizeUnit.BYTES); return settings; } @@ -379,6 +388,71 @@ private void testUpdateIndexSettingsAtLeastOneNotAllowedSettings(String index) { } } + public void testFileCacheStats() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final Client client = client(); + final int numNodes = 2; + + internalCluster().ensureAtLeastNumDataNodes(numNodes); + createIndexWithDocsAndEnsureGreen(1, 100, indexName1); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1); + deleteIndicesAndEnsureGreen(client, indexName1); + assertAllNodesFileCacheEmpty(); + + internalCluster().ensureAtLeastNumSearchNodes(numNodes); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertNodesFileCacheNonEmpty(numNodes); + } + + /** + * Tests file cache restore scenario for searchable snapshots by creating an index, + * taking a snapshot, restoring it as a searchable snapshot. + * It ensures file cache is restored post node restart. + */ + public void testFileCacheRestore() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + // Keeping the replicas to 0 for reproducible cache results as shards can get reassigned otherwise + final int numReplicasIndex = 0; + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(numReplicasIndex + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex, 100, indexName); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + deleteIndicesAndEnsureGreen(client, indexName); + + internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 1); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertDocCount(restoredIndexName, 100L); + assertIndexDirectoryDoesNotExist(restoredIndexName); + + NodesStatsResponse preRestoreStats = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); + for (NodeStats nodeStats : preRestoreStats.getNodes()) { + if (nodeStats.getNode().isSearchNode()) { + internalCluster().restartNode(nodeStats.getNode().getName()); + } + } + + NodesStatsResponse postRestoreStats = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); + Map preRestoreStatsMap = preRestoreStats.getNodesMap(); + Map postRestoreStatsMap = postRestoreStats.getNodesMap(); + for (String node : postRestoreStatsMap.keySet()) { + NodeStats preRestoreStat = preRestoreStatsMap.get(node); + NodeStats postRestoreStat = postRestoreStatsMap.get(node); + if (preRestoreStat.getNode().isSearchNode()) { + assertEquals(preRestoreStat.getFileCacheStats().getUsed(), postRestoreStat.getFileCacheStats().getUsed()); + } + } + } + /** * Picks a shard out of the cluster state for each given index and asserts * that the 'index' directory does not exist in the node's file system. @@ -417,26 +491,6 @@ private void assertIndexDirectoryDoesNotExist(String... indexNames) { } } - public void testFileCacheStats() throws Exception { - final String snapshotName = "test-snap"; - final String repoName = "test-repo"; - final String indexName1 = "test-idx-1"; - final Client client = client(); - final int numNodes = 2; - - internalCluster().ensureAtLeastNumDataNodes(numNodes); - createIndexWithDocsAndEnsureGreen(1, 100, indexName1); - - createRepositoryWithSettings(null, repoName); - takeSnapshot(client, snapshotName, repoName, indexName1); - deleteIndicesAndEnsureGreen(client, indexName1); - assertAllNodesFileCacheEmpty(); - - internalCluster().ensureAtLeastNumSearchNodes(numNodes); - restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); - assertNodesFileCacheNonEmpty(numNodes); - } - private void assertAllNodesFileCacheEmpty() { NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().all()).actionGet(); for (NodeStats stats : response.getNodes()) { @@ -468,6 +522,7 @@ private boolean isFileCacheEmpty(FileCacheStats stats) { return stats.getUsed().getBytes() == 0L && stats.getActive().getBytes() == 0L; } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6738") public void testPruneFileCacheOnIndexDeletion() throws Exception { final String snapshotName = "test-snap"; final String repoName = "test-repo"; @@ -489,4 +544,48 @@ public void testPruneFileCacheOnIndexDeletion() throws Exception { deleteIndicesAndEnsureGreen(client, restoredIndexName1); assertAllNodesFileCacheEmpty(); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6686") + public void testCacheFilesAreClosedAfterUse() throws Exception { + final int numReplicasIndex = randomIntBetween(1, 4); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final String id = randomAlphaOfLength(5); + final Client client = client(); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + createIndex(indexName); + client().prepareIndex(indexName).setId(id).setSource("field", "test").get(); + ensureGreen(); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + + // Search document to make the index fetch data from the remote snapshot to local storage + SearchResponse searchResponse = client().prepareSearch(restoredIndexName).setQuery(QueryBuilders.termQuery("field", "test")).get(); + assertHitCount(searchResponse, 1); + + // The local cache files should be closed by deleting the restored index + deleteIndicesAndEnsureGreen(client, restoredIndexName); + + logger.info("--> validate all the cache files are closed"); + // Get path of cache files + final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class); + Path fileCachePath = nodeEnv.fileCacheNodePath().fileCachePath; + // Find all the files in the path + try (Stream paths = Files.walk(fileCachePath)) { + paths.filter(Files::isRegularFile).forEach(path -> { + // Testing moving the file to check the file is closed or not. + try { + Files.move(path, path, StandardCopyOption.REPLACE_EXISTING); + } catch (IOException e) { + fail("No exception is expected. The file can't be moved, so it may not be closed."); + } + }); + } catch (NoSuchFileException e) { + logger.debug("--> the path for the cache files doesn't exist"); + } + } } diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 4eba5a591d8f9..8a6e209a1fbc5 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -93,6 +93,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_7 = new Version(1030799, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_8 = new Version(1030899, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_9 = new Version(1030999, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_10 = new Version(1031099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); @@ -109,6 +110,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_2); public static final Version V_2_5_1 = new Version(2050199, org.apache.lucene.util.Version.LUCENE_9_4_2); public static final Version V_2_6_0 = new Version(2060099, org.apache.lucene.util.Version.LUCENE_9_5_0); + public static final Version V_2_6_1 = new Version(2060199, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version V_2_7_0 = new Version(2070099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_2_7_0; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 42758b845093f..d475826b059d8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -62,6 +62,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CollectionUtils; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.discovery.Discovery; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.NodeClosedException; @@ -279,15 +280,20 @@ private void executeHealth( final Predicate validationPredicate = newState -> validateRequest(request, newState, waitCount); if (validationPredicate.test(currentState)) { ClusterHealthResponse clusterHealthResponse = getResponse(request, currentState, waitCount, TimeoutState.OK); - if (request.ensureNodeWeighedIn() && clusterHealthResponse.hasDiscoveredClusterManager()) { - DiscoveryNode localNode = currentState.getNodes().getLocalNode(); - // TODO: make this check more generic, check for node role instead - if (localNode.isDataNode()) { - assert request.local() == true : "local node request false for request for local node weighed in"; - boolean weighedAway = WeightedRoutingUtils.isWeighedAway(localNode.getId(), currentState); - if (weighedAway) { - listener.onFailure(new NodeWeighedAwayException("local node is weighed away")); - return; + if (request.ensureNodeWeighedIn()) { + if (clusterHealthResponse.hasDiscoveredClusterManager() == false) { + listener.onFailure(new ClusterManagerNotDiscoveredException("cluster-manager not discovered")); + return; + } else { + DiscoveryNode localNode = currentState.getNodes().getLocalNode(); + // TODO: make this check more generic, check for node role instead + if (localNode.isDataNode()) { + assert request.local() == true : "local node request false for request for local node weighed in"; + boolean weighedAway = WeightedRoutingUtils.isWeighedAway(localNode.getId(), currentState); + if (weighedAway) { + listener.onFailure(new NodeWeighedAwayException("local node is weighed away")); + return; + } } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index a4edd1d99148a..4643853e4494b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -59,7 +59,9 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "cluster_update_settings_response", true, - args -> { return new ClusterUpdateSettingsResponse((boolean) args[0], (Settings) args[1], (Settings) args[2]); } + args -> { + return new ClusterUpdateSettingsResponse((boolean) args[0], (Settings) args[1], (Settings) args[2]); + } ); static { declareAcknowledgedField(PARSER); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java index 249e313c1f53b..56203d4edc3d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java @@ -81,12 +81,9 @@ protected void clusterManagerOperation( } weightedRoutingService.registerWeightedRoutingMetadata( request, - ActionListener.delegateFailure( - listener, - (delegatedListener, response) -> { - delegatedListener.onResponse(new ClusterPutWeightedRoutingResponse(response.isAcknowledged())); - } - ) + ActionListener.delegateFailure(listener, (delegatedListener, response) -> { + delegatedListener.onResponse(new ClusterPutWeightedRoutingResponse(response.isAcknowledged())); + }) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java index 77f09f02c9a9c..556d46683f0ab 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java @@ -132,7 +132,9 @@ protected void clusterManagerOperation( new String[] { indexName }, ActiveShardCount.DEFAULT, request.timeout(), - shardsAcked -> { finalListener.onResponse(new CreateIndexResponse(true, shardsAcked, indexName)); }, + shardsAcked -> { + finalListener.onResponse(new CreateIndexResponse(true, shardsAcked, indexName)); + }, finalListener::onFailure ); } else { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationShardStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationShardStatsResponse.java new file mode 100644 index 0000000000000..b6855507669b6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationShardStatsResponse.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.replication; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.indices.replication.SegmentReplicationState; + +import java.io.IOException; + +/** + * Segment Replication specific response object for fetching stats from either a primary + * or replica shard. The stats returned are different depending on primary or replica. + * + * @opensearch.internal + */ +public class SegmentReplicationShardStatsResponse implements Writeable { + + @Nullable + private final SegmentReplicationPerGroupStats primaryStats; + + @Nullable + private final SegmentReplicationState replicaStats; + + public SegmentReplicationShardStatsResponse(StreamInput in) throws IOException { + this.primaryStats = in.readOptionalWriteable(SegmentReplicationPerGroupStats::new); + this.replicaStats = in.readOptionalWriteable(SegmentReplicationState::new); + } + + public SegmentReplicationShardStatsResponse(SegmentReplicationPerGroupStats primaryStats) { + this.primaryStats = primaryStats; + this.replicaStats = null; + } + + public SegmentReplicationShardStatsResponse(SegmentReplicationState replicaStats) { + this.replicaStats = replicaStats; + this.primaryStats = null; + } + + public SegmentReplicationPerGroupStats getPrimaryStats() { + return primaryStats; + } + + public SegmentReplicationState getReplicaStats() { + return replicaStats; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(primaryStats); + out.writeOptionalWriteable(replicaStats); + } + + @Override + public String toString() { + return "SegmentReplicationShardStatsResponse{" + "primaryStats=" + primaryStats + ", replicaStats=" + replicaStats + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index fdd29990fb446..1142bf697db27 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -24,11 +24,8 @@ public class SegmentReplicationStatsRequest extends BroadcastRequest { private boolean detailed = false; // Provides extra details in the response private boolean activeOnly = false; // Only reports on active segment replication events - private String[] shards = new String[0]; - private boolean completedOnly = false; - /** * Constructs a request for segment replication stats information for all shards */ @@ -40,8 +37,6 @@ public SegmentReplicationStatsRequest(StreamInput in) throws IOException { super(in); detailed = in.readBoolean(); activeOnly = in.readBoolean(); - completedOnly = in.readBoolean(); - } /** @@ -91,25 +86,6 @@ public void activeOnly(boolean activeOnly) { this.activeOnly = activeOnly; } - /** - * True if completedOnly flag is set, false otherwise. This value is false by default. - * - * @return True if completedOnly flag is set, false otherwise - */ - public boolean completedOnly() { - return completedOnly; - } - - /** - * Set value of the completedOnly flag. If true, this request will only respond with - * latest completed segment replication event information. - * - * @param completedOnly Whether or not to set the completedOnly flag. - */ - public void completedOnly(boolean completedOnly) { - this.completedOnly = completedOnly; - } - /** * Contains list of shard id's if shards are passed, empty otherwise. Array is empty by default. * @@ -134,6 +110,5 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(detailed); out.writeBoolean(activeOnly); - out.writeBoolean(completedOnly); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java index abd48cfe0ba4f..7e68d2ac59f07 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java @@ -35,11 +35,6 @@ public SegmentReplicationStatsRequestBuilder setActiveOnly(boolean activeOnly) { return this; } - public SegmentReplicationStatsRequestBuilder setCompletedOnly(boolean completedOnly) { - request.completedOnly(completedOnly); - return this; - } - public SegmentReplicationStatsRequestBuilder shards(String... indices) { request.shards(indices); return this; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java index 2f72d7dd3e544..a72455be3713a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java @@ -15,7 +15,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.index.SegmentReplicationPerGroupStats; import java.io.IOException; import java.util.List; @@ -27,54 +27,50 @@ * @opensearch.internal */ public class SegmentReplicationStatsResponse extends BroadcastResponse { - private final Map> shardSegmentReplicationStates; + private final Map> replicationStats; public SegmentReplicationStatsResponse(StreamInput in) throws IOException { super(in); - shardSegmentReplicationStates = in.readMapOfLists(StreamInput::readString, SegmentReplicationState::new); + replicationStats = in.readMapOfLists(StreamInput::readString, SegmentReplicationPerGroupStats::new); } /** * Constructs segment replication stats information for a collection of indices and associated shards. Keeps track of how many total shards * were seen, and out of those how many were successfully processed and how many failed. * - * @param totalShards Total count of shards seen - * @param successfulShards Count of shards successfully processed - * @param failedShards Count of shards which failed to process - * @param shardSegmentReplicationStates Map of indices to shard replication information - * @param shardFailures List of failures processing shards + * @param totalShards Total count of shards seen + * @param successfulShards Count of shards successfully processed + * @param failedShards Count of shards which failed to process + * @param replicationStats Map of indices to a list of {@link SegmentReplicationPerGroupStats} + * @param shardFailures List of failures processing shards */ public SegmentReplicationStatsResponse( int totalShards, int successfulShards, int failedShards, - Map> shardSegmentReplicationStates, + Map> replicationStats, List shardFailures ) { super(totalShards, successfulShards, failedShards, shardFailures); - this.shardSegmentReplicationStates = shardSegmentReplicationStates; + this.replicationStats = replicationStats; } - public boolean hasSegmentReplicationStats() { - return shardSegmentReplicationStates.size() > 0; - } - - public Map> shardSegmentReplicationStates() { - return shardSegmentReplicationStates; + public Map> getReplicationStats() { + return replicationStats; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (hasSegmentReplicationStats()) { - for (String index : shardSegmentReplicationStates.keySet()) { - List segmentReplicationStates = shardSegmentReplicationStates.get(index); + if (replicationStats.size() > 0) { + for (String index : replicationStats.keySet()) { + List segmentReplicationStates = replicationStats.get(index); if (segmentReplicationStates == null || segmentReplicationStates.size() == 0) { continue; } builder.startObject(index); - builder.startArray("shards"); - for (SegmentReplicationState segmentReplicationState : segmentReplicationStates) { + builder.startArray("primary_stats"); + for (SegmentReplicationPerGroupStats segmentReplicationState : segmentReplicationStates) { builder.startObject(); segmentReplicationState.toXContent(builder, params); builder.endObject(); @@ -90,7 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMapOfLists(shardSegmentReplicationStates, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMapOfLists(replicationStats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index b216824f420cc..f8d5156ff1192 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.indices.replication; -import org.opensearch.OpenSearchStatusException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; @@ -22,22 +21,24 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.index.IndexService; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTargetService; -import org.opensearch.rest.RestStatus; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.HashSet; -import java.util.Set; +import java.util.Arrays; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.util.HashMap; +import java.util.stream.Collectors; /** * Transport action for shard segment replication operation. This transport action does not actually @@ -48,11 +49,11 @@ public class TransportSegmentReplicationStatsAction extends TransportBroadcastByNodeAction< SegmentReplicationStatsRequest, SegmentReplicationStatsResponse, - SegmentReplicationState> { + SegmentReplicationShardStatsResponse> { private final SegmentReplicationTargetService targetService; private final IndicesService indicesService; - private String singleIndexWithSegmentReplicationDisabled = null; + private final SegmentReplicationPressureService pressureService; @Inject public TransportSegmentReplicationStatsAction( @@ -61,7 +62,8 @@ public TransportSegmentReplicationStatsAction( IndicesService indicesService, SegmentReplicationTargetService targetService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + SegmentReplicationPressureService pressureService ) { super( SegmentReplicationStatsAction.NAME, @@ -74,11 +76,12 @@ public TransportSegmentReplicationStatsAction( ); this.indicesService = indicesService; this.targetService = targetService; + this.pressureService = pressureService; } @Override - protected SegmentReplicationState readShardResult(StreamInput in) throws IOException { - return new SegmentReplicationState(in); + protected SegmentReplicationShardStatsResponse readShardResult(StreamInput in) throws IOException { + return new SegmentReplicationShardStatsResponse(in); } @Override @@ -87,41 +90,51 @@ protected SegmentReplicationStatsResponse newResponse( int totalShards, int successfulShards, int failedShards, - List responses, + List responses, List shardFailures, ClusterState clusterState ) { - // throw exception if API call is made on single index with segment replication disabled. - if (singleIndexWithSegmentReplicationDisabled != null) { - String index = singleIndexWithSegmentReplicationDisabled; - singleIndexWithSegmentReplicationDisabled = null; - throw new OpenSearchStatusException("Segment Replication is not enabled on Index: " + index, RestStatus.BAD_REQUEST); - } String[] shards = request.shards(); - Set set = new HashSet<>(); - if (shards.length > 0) { - for (String shard : shards) { - set.add(shard); + final List shardsToFetch = Arrays.stream(shards).map(Integer::valueOf).collect(Collectors.toList()); + + // organize replica responses by allocationId. + final Map replicaStats = new HashMap<>(); + // map of index name to list of replication group stats. + final Map> primaryStats = new HashMap<>(); + for (SegmentReplicationShardStatsResponse response : responses) { + if (response != null) { + if (response.getReplicaStats() != null) { + final ShardRouting shardRouting = response.getReplicaStats().getShardRouting(); + if (shardsToFetch.isEmpty() || shardsToFetch.contains(shardRouting.shardId().getId())) { + replicaStats.putIfAbsent(shardRouting.allocationId().getId(), response.getReplicaStats()); + } + } + if (response.getPrimaryStats() != null) { + final ShardId shardId = response.getPrimaryStats().getShardId(); + if (shardsToFetch.isEmpty() || shardsToFetch.contains(shardId.getId())) { + primaryStats.compute(shardId.getIndexName(), (k, v) -> { + if (v == null) { + final ArrayList list = new ArrayList<>(); + list.add(response.getPrimaryStats()); + return list; + } else { + v.add(response.getPrimaryStats()); + return v; + } + }); + } + } } } - Map> shardResponses = new HashMap<>(); - for (SegmentReplicationState segmentReplicationState : responses) { - if (segmentReplicationState == null) { - continue; - } - - // Limit responses to only specific shard id's passed in query paramter shards. - int shardId = segmentReplicationState.getShardRouting().shardId().id(); - if (shards.length > 0 && set.contains(Integer.toString(shardId)) == false) { - continue; - } - String indexName = segmentReplicationState.getShardRouting().getIndexName(); - if (!shardResponses.containsKey(indexName)) { - shardResponses.put(indexName, new ArrayList<>()); + // combine the replica stats to the shard stat entry in each group. + for (Map.Entry> entry : primaryStats.entrySet()) { + for (SegmentReplicationPerGroupStats group : entry.getValue()) { + for (SegmentReplicationShardStats replicaStat : group.getReplicaStats()) { + replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null)); + } } - shardResponses.get(indexName).add(segmentReplicationState); } - return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, shardResponses, shardFailures); + return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, primaryStats, shardFailures); } @Override @@ -130,30 +143,24 @@ protected SegmentReplicationStatsRequest readRequestFrom(StreamInput in) throws } @Override - protected SegmentReplicationState shardOperation(SegmentReplicationStatsRequest request, ShardRouting shardRouting) { + protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplicationStatsRequest request, ShardRouting shardRouting) { IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); ShardId shardId = shardRouting.shardId(); - // check if API call is made on single index with segment replication disabled. - if (request.indices().length == 1 && indexShard.indexSettings().isSegRepEnabled() == false) { - singleIndexWithSegmentReplicationDisabled = shardRouting.getIndexName(); + if (indexShard.indexSettings().isSegRepEnabled() == false) { return null; } - if (indexShard.indexSettings().isSegRepEnabled() == false || shardRouting.primary()) { - return null; + + if (shardRouting.primary()) { + return new SegmentReplicationShardStatsResponse(pressureService.getStatsForShard(indexShard)); } // return information about only on-going segment replication events. if (request.activeOnly()) { - return targetService.getOngoingEventSegmentReplicationState(shardId); - } - - // return information about only latest completed segment replication events. - if (request.completedOnly()) { - return targetService.getlatestCompletedEventSegmentReplicationState(shardId); + return new SegmentReplicationShardStatsResponse(targetService.getOngoingEventSegmentReplicationState(shardId)); } - return targetService.getSegmentReplicationState(shardId); + return new SegmentReplicationShardStatsResponse(targetService.getSegmentReplicationState(shardId)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java index c3862bb115b21..65b18845b919a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -74,7 +74,7 @@ */ public class MetadataRolloverService { private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-\\d+$"); - private static final List VALID_ROLLOVER_TARGETS = org.opensearch.common.collect.List.of(ALIAS, DATA_STREAM); + private static final List VALID_ROLLOVER_TARGETS = List.of(ALIAS, DATA_STREAM); private final ThreadPool threadPool; private final MetadataCreateIndexService createIndexService; diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 0657fab55b220..2552d43688f00 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -81,6 +81,7 @@ import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.get.GetResult; @@ -133,6 +134,7 @@ public class TransportShardBulkAction extends TransportWriteAction= shardsIt.size()) { return null; } - ShardRouting next = FailAwareWeightedRouting.getInstance().findNext(shardsIt.get(shardIndex), clusterService.state(), failure); + ShardRouting next = FailAwareWeightedRouting.getInstance() + .findNext(shardsIt.get(shardIndex), clusterService.state(), failure, this::moveToNextShard); if (next != null) { return next; diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java b/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java index ae66dafc954f9..82db29c950161 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java @@ -76,10 +76,9 @@ void executeDocument( pipeline.getVersion(), verbosePipelineProcessor ); - ingestDocument.executePipeline( - verbosePipeline, - (result, e) -> { handler.accept(new SimulateDocumentVerboseResult(processorResultList), e); } - ); + ingestDocument.executePipeline(verbosePipeline, (result, e) -> { + handler.accept(new SimulateDocumentVerboseResult(processorResultList), e); + }); } else { ingestDocument.executePipeline(pipeline, (result, e) -> { if (e == null) { diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 6b6678361b2b5..9a94737c84385 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -450,7 +450,8 @@ private void onShardFailure(final int shardIndex, @Nullable SearchShardTarget sh // we always add the shard failure for a specific shard instance // we do make sure to clean it on a successful response from a shard onShardFailure(shardIndex, shard, e); - SearchShardTarget nextShard = FailAwareWeightedRouting.getInstance().findNext(shardIt, clusterState, e); + SearchShardTarget nextShard = FailAwareWeightedRouting.getInstance() + .findNext(shardIt, clusterState, e, () -> totalOps.incrementAndGet()); final boolean lastShard = nextShard == null; logger.debug( diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index cdbeb3dc2b749..5ea959bc9acf9 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -24,7 +24,6 @@ import java.util.List; import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.opensearch.rest.RestStatus.NOT_FOUND; import static org.opensearch.rest.RestStatus.OK; /** @@ -57,7 +56,6 @@ public List getDeletePitResults() { */ @Override public RestStatus status() { - if (deletePitResults.isEmpty()) return NOT_FOUND; return OK; } diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 241b3de72a258..94a6262874ac2 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -514,7 +514,9 @@ public static void registerRequestHandler(TransportService transportService, Sea FREE_PIT_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, PitFreeContextsRequest::new, - (request, channel, task) -> { channel.sendResponse(searchService.freeReaderContextsIfFound(request.getContextIds())); } + (request, channel, task) -> { + channel.sendResponse(searchService.freeReaderContextsIfFound(request.getContextIds())); + } ); TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, DeletePitResponse::new); diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java index 10645c744b2f3..5abf97b7ef979 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java @@ -251,7 +251,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int // we set the shard failure always, even if its the first in the replication group, and the next one // will work (it will just override it...) setFailure(shardIt, shardIndex, e); - ShardRouting nextShard = FailAwareWeightedRouting.getInstance().findNext(shardIt, clusterService.state(), e); + ShardRouting nextShard = FailAwareWeightedRouting.getInstance() + .findNext(shardIt, clusterService.state(), e, () -> counterOps.incrementAndGet()); if (nextShard != null) { if (e != null) { diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java index d8c4913e595a4..927d3946a3643 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java @@ -245,7 +245,8 @@ private void perform(@Nullable final Exception currentFailure) { lastFailure = currentFailure; this.lastFailure = currentFailure; } - ShardRouting shardRouting = FailAwareWeightedRouting.getInstance().findNext(shardIt, clusterService.state(), currentFailure); + ShardRouting shardRouting = FailAwareWeightedRouting.getInstance() + .findNext(shardIt, clusterService.state(), currentFailure, () -> {}); if (shardRouting == null) { Exception failure = lastFailure; diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 8e913e05390c2..5b6ade0f3b461 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -36,7 +36,6 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; @@ -317,9 +316,7 @@ static void addFilePermissions(Permissions policy, Environment environment) thro addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink", false); - if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { - addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.extensionDir(), "read,readlink", false); - } + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.extensionDir(), "read,readlink", false); addDirectoryPath(policy, "path.conf'", environment.configFile(), "read,readlink", false); // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete", false); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 6c7f4ba487568..17105c5b68b10 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -103,7 +103,7 @@ public ClusterInfo(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { reservedSpaceMap = in.readMap(NodeAndPath::new, ReservedSpace::new); } else { - reservedSpaceMap = org.opensearch.common.collect.Map.of(); + reservedSpaceMap = Map.of(); } ImmutableOpenMap.Builder leastBuilder = ImmutableOpenMap.builder(); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 33200b46a5e98..630684e267093 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -59,7 +59,6 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; -import static org.opensearch.common.collect.Map.of; /** * An index template is comprised of a set of index patterns, an optional template, and a list of @@ -337,7 +336,10 @@ public TimestampField getTimestampField() { public Map getDataStreamMappingSnippet() { return singletonMap( MapperService.SINGLE_MAPPING_NAME, - singletonMap("_data_stream_timestamp", unmodifiableMap(of("enabled", true, "timestamp_field", getTimestampField().toMap()))) + singletonMap( + "_data_stream_timestamp", + unmodifiableMap(Map.of("enabled", true, "timestamp_field", getTimestampField().toMap())) + ) ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java index f9328d5b61183..11ead3e29c346 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java @@ -46,7 +46,6 @@ import static org.opensearch.cluster.metadata.DataStream.getDefaultBackingIndexName; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; -import static org.opensearch.common.collect.List.copyOf; /** * An index abstraction is a reference to one or more concrete indices. @@ -346,7 +345,7 @@ class DataStream implements IndexAbstraction { public DataStream(org.opensearch.cluster.metadata.DataStream dataStream, List dataStreamIndices) { this.dataStream = dataStream; - this.dataStreamIndices = copyOf(dataStreamIndices); + this.dataStreamIndices = List.copyOf(dataStreamIndices); this.writeIndex = dataStreamIndices.get(dataStreamIndices.size() - 1); assert writeIndex.getIndex().getName().equals(getDefaultBackingIndexName(dataStream.getName(), dataStream.getGeneration())); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index 3d1609e0eb9c0..1fbb042edca76 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -90,10 +90,7 @@ public class IndexNameExpressionResolver { private final DateMathExpressionResolver dateMathExpressionResolver = new DateMathExpressionResolver(); private final WildcardExpressionResolver wildcardExpressionResolver = new WildcardExpressionResolver(); - private final List expressionResolvers = org.opensearch.common.collect.List.of( - dateMathExpressionResolver, - wildcardExpressionResolver - ); + private final List expressionResolvers = List.of(dateMathExpressionResolver, wildcardExpressionResolver); private final ThreadContext threadContext; @@ -177,7 +174,7 @@ public List dataStreamNames(ClusterState state, IndicesOptions options, } List dataStreams = wildcardExpressionResolver.resolve(context, Arrays.asList(indexExpressions)); - return ((dataStreams == null) ? org.opensearch.common.collect.List.of() : dataStreams).stream() + return ((dataStreams == null) ? List.of() : dataStreams).stream() .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) .filter(ia -> ia.getType() == IndexAbstraction.Type.DATA_STREAM) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 7be5ea7e2c34a..b34e37aea8b7d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -100,7 +100,9 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, new String[] { firstBackingIndexName }, ActiveShardCount.DEFAULT, request.masterNodeTimeout(), - shardsAcked -> { finalListener.onResponse(new AcknowledgedResponse(true)); }, + shardsAcked -> { + finalListener.onResponse(new AcknowledgedResponse(true)); + }, finalListener::onFailure ); } else { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d904459af38b8..9c0825158da09 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -731,7 +731,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( // shard id and the current timestamp indexService.newQueryShardContext(0, null, () -> 0L, null) ), - org.opensearch.common.collect.List.of(), + List.of(), metadataTransformer ); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java index 1e4c8a24863cd..dbef876c9a258 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java @@ -60,9 +60,17 @@ private boolean isInternalFailure(Exception exception) { * routing weight set to zero * * @param shardIt Shard Iterator containing order in which shard copies for a shard need to be requested + * @param clusterState The current cluster state + * @param exception The underlying search exception + * @param onShardSkipped The runnable to execute once a shard is skipped * @return the next shard copy */ - public SearchShardTarget findNext(final SearchShardIterator shardIt, ClusterState clusterState, Exception exception) { + public SearchShardTarget findNext( + final SearchShardIterator shardIt, + ClusterState clusterState, + Exception exception, + Runnable onShardSkipped + ) { SearchShardTarget next = shardIt.nextOrNull(); while (next != null && WeightedRoutingUtils.isWeighedAway(next.getNodeId(), clusterState)) { SearchShardTarget nextShard = next; @@ -72,6 +80,7 @@ public SearchShardTarget findNext(final SearchShardIterator shardIt, ClusterStat break; } next = shardIt.nextOrNull(); + onShardSkipped.run(); } return next; } @@ -82,9 +91,12 @@ public SearchShardTarget findNext(final SearchShardIterator shardIt, ClusterStat * routing weight set to zero * * @param shardsIt Shard Iterator containing order in which shard copies for a shard need to be requested + * @param clusterState The current cluster state + * @param exception The underlying search exception + * @param onShardSkipped The runnable to execute once a shard is skipped * @return the next shard copy */ - public ShardRouting findNext(final ShardsIterator shardsIt, ClusterState clusterState, Exception exception) { + public ShardRouting findNext(final ShardsIterator shardsIt, ClusterState clusterState, Exception exception, Runnable onShardSkipped) { ShardRouting next = shardsIt.nextOrNull(); while (next != null && WeightedRoutingUtils.isWeighedAway(next.currentNodeId(), clusterState)) { @@ -95,6 +107,7 @@ public ShardRouting findNext(final ShardsIterator shardsIt, ClusterState cluster break; } next = shardsIt.nextOrNull(); + onShardSkipped.run(); } return next; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 3d9847ca35931..e8ab0738c18da 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -8,80 +8,39 @@ import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Predicate; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isIndexShardsPerNodeBreached; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPrimaryShardsPerNodeBreached; /** - * Allocation constraints specify conditions which, if breached, reduce the - * priority of a node for receiving shard allocations. + * Allocation constraints specify conditions which, if breached, reduce the priority of a node for receiving unassigned + * shard allocations. Weight calculation in other scenarios like shard movement and re-balancing remain unaffected by + * this constraint. * * @opensearch.internal */ public class AllocationConstraints { - public final long CONSTRAINT_WEIGHT = 1000000L; - private List> constraintPredicates; + private Map constraints; public AllocationConstraints() { - this.constraintPredicates = new ArrayList<>(1); - this.constraintPredicates.add(isIndexShardsPerNodeBreached()); + this.constraints = new HashMap<>(); + this.constraints.putIfAbsent(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); + this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.putIfAbsent(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached())); } - class ConstraintParams { - private ShardsBalancer balancer; - private BalancedShardsAllocator.ModelNode node; - private String index; - - ConstraintParams(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { - this.balancer = balancer; - this.node = node; - this.index = index; - } + public void updateAllocationConstraint(String constraint, boolean enable) { + this.constraints.get(constraint).setEnable(enable); } - /** - * Evaluates configured allocation constraint predicates for given node - index - * combination; and returns a weight value based on the number of breached - * constraints. - * - * Constraint weight should be added to the weight calculated via weight - * function, to reduce priority of allocating on nodes with breached - * constraints. - * - * This weight function is used only in case of unassigned shards to avoid overloading a newly added node. - * Weight calculation in other scenarios like shard movement and re-balancing remain unaffected by this function. - */ public long weight(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { - int constraintsBreached = 0; - ConstraintParams params = new ConstraintParams(balancer, node, index); - for (Predicate predicate : constraintPredicates) { - if (predicate.test(params)) { - constraintsBreached++; - } - } - return constraintsBreached * CONSTRAINT_WEIGHT; + Constraint.ConstraintParams params = new Constraint.ConstraintParams(balancer, node, index); + return params.weight(constraints); } - - /** - * Constraint to control number of shards of an index allocated on a single - * node. - * - * In current weight function implementation, when a node has significantly - * fewer shards than other nodes (e.g. during single new node addition or node - * replacement), its weight is much less than other nodes. All shard allocations - * at this time tend to land on the new node with skewed weight. This breaks - * index level balance in the cluster, by creating all shards of the same index - * on one node, often resulting in a hotspot on that node. - * - * This constraint is breached when balancer attempts to allocate more than - * average shards per index per node. - */ - private Predicate isIndexShardsPerNodeBreached() { - return (params) -> { - int currIndexShardsOnNode = params.node.numShards(params.index); - int allowedIndexShardsPerNode = (int) Math.ceil(params.balancer.avgShardsPerNode(params.index)); - return (currIndexShardsOnNode >= allowedIndexShardsPerNode); - }; - } - } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/Constraint.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/Constraint.java new file mode 100644 index 0000000000000..e9c3c0afcbe88 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/Constraint.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; + +import java.util.Map; +import java.util.function.Predicate; + +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CONSTRAINT_WEIGHT; + +/** + * Defines a constraint useful to de-prioritize certain nodes as target of unassigned shards used in {@link AllocationConstraints} or + * re-balancing target used in {@link RebalanceConstraints} + * + * @opensearch.internal + */ +public class Constraint implements Predicate { + + private boolean enable; + private Predicate predicate; + + public Constraint(Predicate constraintPredicate) { + this.predicate = constraintPredicate; + } + + @Override + public boolean test(ConstraintParams constraintParams) { + return this.enable && predicate.test(constraintParams); + } + + public void setEnable(boolean enable) { + this.enable = enable; + } + + static class ConstraintParams { + private ShardsBalancer balancer; + private BalancedShardsAllocator.ModelNode node; + private String index; + + ConstraintParams(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + this.balancer = balancer; + this.node = node; + this.index = index; + } + + public ShardsBalancer getBalancer() { + return balancer; + } + + public BalancedShardsAllocator.ModelNode getNode() { + return node; + } + + public String getIndex() { + return index; + } + + /** + * Evaluates configured allocation constraint predicates for given node - index + * combination; and returns a weight value based on the number of breached + * constraints. + *

+ * Constraint weight should be added to the weight calculated via weight + * function, to reduce priority of allocating on nodes with breached + * constraints. + *

+ */ + public long weight(Map constraints) { + long totalConstraintWeight = 0; + for (Constraint constraint : constraints.values()) { + if (constraint.test(this)) { + totalConstraintWeight += CONSTRAINT_WEIGHT; + } + } + return totalConstraintWeight; + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java new file mode 100644 index 0000000000000..f209e993518c1 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import java.util.function.Predicate; + +/** + * Defines different constraints definitions + * + * @opensearch.internal + */ +public class ConstraintTypes { + public final static long CONSTRAINT_WEIGHT = 1000000L; + + /** + * Defines per index constraint which is breached when a node contains more than avg number of primary shards for an index + */ + public final static String INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID = "index.primary.shard.balance.constraint"; + + /** + * Defines a cluster constraint which is breached when a node contains more than avg primary shards across all indices + */ + public final static String CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID = "cluster.primary.shard.balance.constraint"; + + /** + * Defines an index constraint which is breached when a node contains more than avg number of shards for an index + */ + public final static String INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID = "index.shard.count.constraint"; + + /** + * Constraint to control number of shards of an index allocated on a single + * node. + * + * In current weight function implementation, when a node has significantly + * fewer shards than other nodes (e.g. during single new node addition or node + * replacement), its weight is much less than other nodes. All shard allocations + * at this time tend to land on the new node with skewed weight. This breaks + * index level balance in the cluster, by creating all shards of the same index + * on one node, often resulting in a hotspot on that node. + * + * This constraint is breached when balancer attempts to allocate more than + * average shards per index per node. + */ + public static Predicate isIndexShardsPerNodeBreached() { + return (params) -> { + int currIndexShardsOnNode = params.getNode().numShards(params.getIndex()); + int allowedIndexShardsPerNode = (int) Math.ceil(params.getBalancer().avgShardsPerNode(params.getIndex())); + return (currIndexShardsOnNode >= allowedIndexShardsPerNode); + }; + } + + /** + * Defines a predicate which returns true when specific to an index, a node contains more than average number of primary + * shards. This constraint is used in weight calculation during allocation and rebalancing. When breached a high weight + * {@link ConstraintTypes#CONSTRAINT_WEIGHT} is assigned to node resulting in lesser chances of node being selected + * as allocation or rebalancing target + */ + public static Predicate isPerIndexPrimaryShardsPerNodeBreached() { + return (params) -> { + int perIndexPrimaryShardCount = params.getNode().numPrimaryShards(params.getIndex()); + int perIndexAllowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode(params.getIndex())); + return perIndexPrimaryShardCount > perIndexAllowedPrimaryShardCount; + }; + } + + /** + * Defines a predicate which returns true when a node contains more than average number of primary shards. This + * constraint is used in weight calculation during allocation only. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} + * is assigned to node resulting in lesser chances of node being selected as allocation target + */ + public static Predicate isPrimaryShardsPerNodeBreached() { + return (params) -> { + int primaryShardCount = params.getNode().numPrimaryShards(); + int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode()); + return primaryShardCount >= allowedPrimaryShardCount; + }; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index d93dc8b12d28a..6f63aff2f3a90 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -403,6 +403,7 @@ public void onNewInfo(ClusterInfo info) { // If all the nodes are breaching high disk watermark, we apply index create block to avoid red clusters. if ((state.getBlocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id()) == false) + && nodes.size() > 0 && nodesOverHighThreshold.size() == nodes.size()) { setIndexCreateBlock(listener, true); } else if (state.getBlocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id()) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java new file mode 100644 index 0000000000000..a4036ec47ec0e --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.allocator.ShardsBalancer; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; + +/** + * Constraints applied during rebalancing round; specify conditions which, if breached, reduce the + * priority of a node for receiving shard relocations. + * + * @opensearch.internal + */ +public class RebalanceConstraints { + + private Map constraints; + + public RebalanceConstraints() { + this.constraints = new HashMap<>(); + this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + } + + public void updateRebalanceConstraint(String constraint, boolean enable) { + this.constraints.get(constraint).setEnable(enable); + } + + public long weight(ShardsBalancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + Constraint.ConstraintParams params = new Constraint.ConstraintParams(balancer, node, index); + return params.weight(constraints); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index d8761e9b1a78e..0ff0eeba7d394 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -42,7 +42,9 @@ import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationConstraints; +import org.opensearch.cluster.routing.allocation.ConstraintTypes; import org.opensearch.cluster.routing.allocation.MoveDecision; +import org.opensearch.cluster.routing.allocation.RebalanceConstraints; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.inject.Inject; @@ -58,9 +60,13 @@ import java.util.Map; import java.util.Set; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; + /** * The {@link BalancedShardsAllocator} re-balances the nodes allocations - * within an cluster based on a {@link WeightFunction}. The clusters balance is defined by four parameters which can be set + * within a cluster based on a {@link WeightFunction}. The clusters balance is defined by four parameters which can be set * in the cluster update API that allows changes in real-time: *
  • cluster.routing.allocation.balance.shard - The shard balance defines the weight factor * for shards allocated on a {@link RoutingNode}
  • @@ -68,6 +74,7 @@ * of {@link org.opensearch.cluster.routing.ShardRouting}s per index allocated on a specific node *
  • cluster.routing.allocation.balance.threshold - A threshold to set the minimal optimization * value of operations that should be performed
  • + *
  • cluster.routing.allocation.balance.prefer_primary - Defines whether primary shard balance is desired
  • *
*

* These parameters are combined in a {@link WeightFunction} that allows calculation of node weights which @@ -93,6 +100,10 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.Dynamic, Property.NodeScope ); + + /** + * Move primary shards first from node for shard movement when shards can not stay on node anymore. {@link LocalShardsBalancer#moveShards()} + */ public static final Setting SHARD_MOVE_PRIMARY_FIRST_SETTING = Setting.boolSetting( "cluster.routing.allocation.move.primary_first", false, @@ -107,7 +118,22 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + /** + * This setting governs whether primary shards balance is desired during allocation. This is used by {@link ConstraintTypes#isPerIndexPrimaryShardsPerNodeBreached()} + * and {@link ConstraintTypes#isPrimaryShardsPerNodeBreached} which is used during unassigned shard allocation + * {@link LocalShardsBalancer#allocateUnassigned()} and shard re-balance/relocation to a different node via {@link LocalShardsBalancer#balance()} . + */ + + public static final Setting PREFER_PRIMARY_SHARD_BALANCE = Setting.boolSetting( + "cluster.routing.allocation.balance.prefer_primary", + false, + Property.Dynamic, + Property.NodeScope + ); + private volatile boolean movePrimaryFirst; + + private volatile boolean preferPrimaryShardBalance; private volatile WeightFunction weightFunction; private volatile float threshold; @@ -119,6 +145,8 @@ public BalancedShardsAllocator(Settings settings) { public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); + setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); + clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); @@ -132,6 +160,17 @@ private void setWeightFunction(float indexBalance, float shardBalanceFactor) { weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); } + /** + * When primary shards balance is desired, enable primary shard balancing constraints + * @param preferPrimaryShardBalance + */ + private void setPreferPrimaryShardBalance(boolean preferPrimaryShardBalance) { + this.preferPrimaryShardBalance = preferPrimaryShardBalance; + this.weightFunction.updateAllocationConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); + this.weightFunction.updateAllocationConstraint(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); + this.weightFunction.updateRebalanceConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); + } + private void setThreshold(float threshold) { this.threshold = threshold; } @@ -142,7 +181,14 @@ public void allocate(RoutingAllocation allocation) { failAllocationOfNewPrimaries(allocation); return; } - final ShardsBalancer localShardsBalancer = new LocalShardsBalancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); + final ShardsBalancer localShardsBalancer = new LocalShardsBalancer( + logger, + allocation, + movePrimaryFirst, + weightFunction, + threshold, + preferPrimaryShardBalance + ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); localShardsBalancer.balance(); @@ -157,7 +203,14 @@ public void allocate(RoutingAllocation allocation) { @Override public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, final RoutingAllocation allocation) { - ShardsBalancer localShardsBalancer = new LocalShardsBalancer(logger, allocation, movePrimaryFirst, weightFunction, threshold); + ShardsBalancer localShardsBalancer = new LocalShardsBalancer( + logger, + allocation, + movePrimaryFirst, + weightFunction, + threshold, + preferPrimaryShardBalance + ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; if (shard.unassigned()) { @@ -220,6 +273,13 @@ public float getShardBalance() { return weightFunction.shardBalance; } + /** + * Returns preferPrimaryShardBalance. + */ + public boolean getPreferPrimaryBalance() { + return preferPrimaryShardBalance; + } + /** * This class is the primary weight function used to create balanced over nodes and shards in the cluster. * Currently this function has 3 properties: @@ -253,6 +313,7 @@ static class WeightFunction { private final float theta0; private final float theta1; private AllocationConstraints constraints; + private RebalanceConstraints rebalanceConstraints; WeightFunction(float indexBalance, float shardBalance) { float sum = indexBalance + shardBalance; @@ -264,6 +325,9 @@ static class WeightFunction { this.indexBalance = indexBalance; this.shardBalance = shardBalance; this.constraints = new AllocationConstraints(); + this.rebalanceConstraints = new RebalanceConstraints(); + // Enable index shard per node breach constraint + updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); } public float weightWithAllocationConstraints(ShardsBalancer balancer, ModelNode node, String index) { @@ -271,11 +335,24 @@ public float weightWithAllocationConstraints(ShardsBalancer balancer, ModelNode return balancerWeight + constraints.weight(balancer, node, index); } + public float weightWithRebalanceConstraints(ShardsBalancer balancer, ModelNode node, String index) { + float balancerWeight = weight(balancer, node, index); + return balancerWeight + rebalanceConstraints.weight(balancer, node, index); + } + float weight(ShardsBalancer balancer, ModelNode node, String index) { final float weightShard = node.numShards() - balancer.avgShardsPerNode(); final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); return theta0 * weightShard + theta1 * weightIndex; } + + void updateAllocationConstraint(String constraint, boolean enable) { + this.constraints.updateAllocationConstraint(constraint, enable); + } + + void updateRebalanceConstraint(String constraint, boolean add) { + this.rebalanceConstraints.updateRebalanceConstraint(constraint, add); + } } /** @@ -313,6 +390,15 @@ public int numShards(String idx) { return index == null ? 0 : index.numShards(); } + public int numPrimaryShards(String idx) { + ModelIndex index = indices.get(idx); + return index == null ? 0 : index.numPrimaryShards(); + } + + public int numPrimaryShards() { + return indices.values().stream().mapToInt(index -> index.numPrimaryShards()).sum(); + } + public int highestPrimary(String index) { ModelIndex idx = indices.get(index); if (idx != null) { @@ -358,7 +444,6 @@ public boolean containsShard(ShardRouting shard) { ModelIndex index = getIndex(shard.getIndexName()); return index == null ? false : index.containsShard(shard); } - } /** @@ -374,9 +459,10 @@ public Balancer( RoutingAllocation allocation, boolean movePrimaryFirst, BalancedShardsAllocator.WeightFunction weight, - float threshold + float threshold, + boolean preferPrimaryBalance ) { - super(logger, allocation, movePrimaryFirst, weight, threshold); + super(logger, allocation, movePrimaryFirst, weight, threshold, preferPrimaryBalance); } } @@ -388,12 +474,17 @@ public Balancer( static final class ModelIndex implements Iterable { private final String id; private final Set shards = new HashSet<>(4); // expect few shards of same index to be allocated on same node + private final Set primaryShards = new HashSet<>(); private int highestPrimary = -1; ModelIndex(String id) { this.id = id; } + public int numPrimaryShards() { + return primaryShards.size(); + } + public int highestPrimary() { if (highestPrimary == -1) { int maxId = -1; @@ -423,12 +514,20 @@ public Iterator iterator() { public void removeShard(ShardRouting shard) { highestPrimary = -1; assert shards.contains(shard) : "Shard not allocated on current node: " + shard; + if (shard.primary()) { + assert primaryShards.contains(shard) : "Primary shard not allocated on current node: " + shard; + primaryShards.remove(shard); + } shards.remove(shard); } public void addShard(ShardRouting shard) { highestPrimary = -1; - assert !shards.contains(shard) : "Shard already allocated on current node: " + shard; + assert shards.contains(shard) == false : "Shard already allocated on current node: " + shard; + if (shard.primary()) { + assert primaryShards.contains(shard) == false : "Primary shard already allocated on current node: " + shard; + primaryShards.add(shard); + } shards.add(shard); } @@ -476,7 +575,7 @@ public void reset(String index) { } public float weight(ModelNode node) { - return function.weight(balancer, node, index); + return function.weightWithRebalanceConstraints(balancer, node, index); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 3c5e4013748af..0830f26be87f0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -42,6 +42,7 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import java.util.stream.StreamSupport; import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -58,11 +59,15 @@ public class LocalShardsBalancer extends ShardsBalancer { private final RoutingAllocation allocation; private final RoutingNodes routingNodes; private final boolean movePrimaryFirst; + + private final boolean preferPrimaryBalance; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; private final Metadata metadata; private final float avgShardsPerNode; + + private final float avgPrimaryShardsPerNode; private final BalancedShardsAllocator.NodeSorter sorter; private final Set inEligibleTargetNode; @@ -71,7 +76,8 @@ public LocalShardsBalancer( RoutingAllocation allocation, boolean movePrimaryFirst, BalancedShardsAllocator.WeightFunction weight, - float threshold + float threshold, + boolean preferPrimaryBalance ) { this.logger = logger; this.allocation = allocation; @@ -81,9 +87,13 @@ public LocalShardsBalancer( this.routingNodes = allocation.routingNodes(); this.metadata = allocation.metadata(); avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); + avgPrimaryShardsPerNode = (float) (StreamSupport.stream(metadata.spliterator(), false) + .mapToInt(IndexMetadata::getNumberOfShards) + .sum()) / routingNodes.size(); nodes = Collections.unmodifiableMap(buildModelFromAssigned()); sorter = newNodeSorter(); inEligibleTargetNode = new HashSet<>(); + this.preferPrimaryBalance = preferPrimaryBalance; } /** @@ -101,6 +111,16 @@ public float avgShardsPerNode(String index) { return ((float) metadata.index(index).getTotalNumberOfShards()) / nodes.size(); } + @Override + public float avgPrimaryShardsPerNode(String index) { + return ((float) metadata.index(index).getNumberOfShards()) / nodes.size(); + } + + @Override + public float avgPrimaryShardsPerNode() { + return avgPrimaryShardsPerNode; + } + /** * Returns the global average of shards per node */ @@ -223,7 +243,7 @@ MoveDecision decideRebalance(final ShardRouting shard) { // this is a comparison of the number of shards on this node to the number of shards // that should be on each node on average (both taking the cluster as a whole into account // as well as shards per index) - final float nodeWeight = weight.weight(this, node, idxName); + final float nodeWeight = weight.weightWithRebalanceConstraints(this, node, idxName); // if the node we are examining has a worse (higher) weight than the node the shard is // assigned to, then there is no way moving the shard to the node with the worse weight // can make the balance of the cluster better, so we check for that here @@ -959,6 +979,7 @@ AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { } private static final Comparator BY_DESCENDING_SHARD_ID = Comparator.comparing(ShardRouting::shardId).reversed(); + private static final Comparator PRIMARY_FIRST = Comparator.comparing(ShardRouting::primary).reversed(); /** * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the @@ -969,12 +990,16 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala final BalancedShardsAllocator.ModelIndex index = maxNode.getIndex(idx); if (index != null) { logger.trace("Try relocating shard of [{}] from [{}] to [{}]", idx, maxNode.getNodeId(), minNode.getNodeId()); - final Iterable shardRoutings = StreamSupport.stream(index.spliterator(), false) + Stream routingStream = StreamSupport.stream(index.spliterator(), false) .filter(ShardRouting::started) // cannot rebalance unassigned, initializing or relocating shards anyway - .filter(maxNode::containsShard) - .sorted(BY_DESCENDING_SHARD_ID) // check in descending order of shard id so that the decision is deterministic - ::iterator; + .filter(maxNode::containsShard) // check shards which are present on heaviest node + .sorted(BY_DESCENDING_SHARD_ID); // check in descending order of shard id so that the decision is deterministic + // If primary balance is preferred then prioritize moving primaries first + if (preferPrimaryBalance == true) { + routingStream = routingStream.sorted(PRIMARY_FIRST); + } + final Iterable shardRoutings = routingStream::iterator; final AllocationDeciders deciders = allocation.deciders(); for (ShardRouting shard : shardRoutings) { final Decision rebalanceDecision = deciders.canRebalance(shard, allocation); @@ -985,9 +1010,15 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala if (allocationDecision.type() == Decision.Type.NO) { continue; } + // This is a safety net which prevents un-necessary primary shard relocations from maxNode to minNode when + // doing such relocation wouldn't help in primary balance. + if (preferPrimaryBalance == true + && shard.primary() + && maxNode.numPrimaryShards(shard.getIndexName()) - minNode.numPrimaryShards(shard.getIndexName()) < 2) { + continue; + } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); - maxNode.removeShard(shard); long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java index 593e6998141fb..ef2dbd34644a7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsBalancer.java @@ -72,4 +72,19 @@ public float avgShardsPerNode() { public float avgShardsPerNode(String index) { return Float.MAX_VALUE; } + + /** + * Returns the average of primary shards per node for the given index + */ + public float avgPrimaryShardsPerNode(String index) { + return Float.MAX_VALUE; + } + + /** + * Returns the average of primary shards per node + */ + public float avgPrimaryShardsPerNode() { + return Float.MAX_VALUE; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java index c87f7d16079e9..c11f5823cf3a7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.ShardRouting; @@ -35,7 +36,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing if (RoutingPool.REMOTE_CAPABLE.equals(shardPool) && RoutingPool.LOCAL_ONLY.equals(targetNodePool)) { logger.debug( "Shard: [{}] has target pool: [{}]. Cannot allocate on node: [{}] with target pool: [{}]", - shardRouting.shortSummary(), + shardRouting, shardPool, node.node(), targetNodePool @@ -47,7 +48,25 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing shardPool, targetNodePool ); - } + } else if (RoutingPool.LOCAL_ONLY.equals(shardPool) + && RoutingPool.REMOTE_CAPABLE.equals(targetNodePool) + && !node.node().getRoles().contains(DiscoveryNodeRole.DATA_ROLE)) { + logger.debug( + "Shard: [{}] has target pool: [{}]. Cannot allocate on node: [{}] without the [{}] node role", + shardRouting, + shardPool, + node.node(), + DiscoveryNodeRole.DATA_ROLE + ); + return allocation.decision( + Decision.NO, + NAME, + "Routing pools are incompatible. Shard pool: [{}], Node Pool: [{}] without [{}] role", + shardPool, + targetNodePool, + DiscoveryNodeRole.DATA_ROLE + ); + } return allocation.decision( Decision.YES, NAME, @@ -91,7 +110,25 @@ private Decision canAllocateInTargetPool(IndexMetadata indexMetadata, DiscoveryN indexPool, targetNodePool ); - } + } else if (RoutingPool.LOCAL_ONLY.equals(indexPool) + && RoutingPool.REMOTE_CAPABLE.equals(targetNodePool) + && !node.getRoles().contains(DiscoveryNodeRole.DATA_ROLE)) { + logger.debug( + "Index: [{}] has target pool: [{}]. Cannot allocate on node: [{}] without the [{}] node role", + indexMetadata.getIndex().getName(), + indexPool, + node, + DiscoveryNodeRole.DATA_ROLE + ); + return allocation.decision( + Decision.NO, + NAME, + "Routing pools are incompatible. Index pool: [{}], Node Pool: [{}] without [{}] role", + indexPool, + targetNodePool, + DiscoveryNodeRole.DATA_ROLE + ); + } return allocation.decision( Decision.YES, NAME, diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 780f12c12f134..03bc3219ac8f8 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -905,11 +905,9 @@ private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterSt ); if (Assertions.ENABLED) { ClusterTasksResult finalClusterTasksResult = clusterTasksResult; - taskInputs.updateTasks.forEach( - updateTask -> { - assert finalClusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask; - } - ); + taskInputs.updateTasks.forEach(updateTask -> { + assert finalClusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask; + }); } return clusterTasksResult; diff --git a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java index b5710bab41172..ed744f4f42d07 100644 --- a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java +++ b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java @@ -46,6 +46,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; @@ -61,7 +62,7 @@ public abstract class TaskBatcher { private final Logger logger; private final PrioritizedOpenSearchThreadPoolExecutor threadExecutor; // package visible for tests - final Map> tasksPerBatchingKey = new HashMap<>(); + final Map> tasksPerBatchingKey = new ConcurrentHashMap<>(); private final TaskBatcherListener taskBatcherListener; public TaskBatcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor, TaskBatcherListener taskBatcherListener) { @@ -85,20 +86,11 @@ public void submitTasks(List tasks, @Nullable TimeValue t try { // convert to an identity map to check for dups based on task identity final Map tasksIdentity = tasks.stream() - .collect( - Collectors.toMap( - BatchedTask::getTask, - Function.identity(), - (a, b) -> { throw new IllegalStateException("cannot add duplicate task: " + a); }, - IdentityHashMap::new - ) - ); - - synchronized (tasksPerBatchingKey) { - LinkedHashSet existingTasks = tasksPerBatchingKey.computeIfAbsent( - firstTask.batchingKey, - k -> new LinkedHashSet<>(tasks.size()) - ); + .collect(Collectors.toMap(BatchedTask::getTask, Function.identity(), (a, b) -> { + throw new IllegalStateException("cannot add duplicate task: " + a); + }, IdentityHashMap::new)); + LinkedHashSet newTasks = new LinkedHashSet<>(tasks); + tasksPerBatchingKey.merge(firstTask.batchingKey, newTasks, (existingTasks, updatedTasks) -> { for (BatchedTask existing : existingTasks) { // check that there won't be two tasks with the same identity for the same batching key BatchedTask duplicateTask = tasksIdentity.get(existing.getTask()); @@ -112,8 +104,9 @@ public void submitTasks(List tasks, @Nullable TimeValue t ); } } - existingTasks.addAll(tasks); - } + existingTasks.addAll(updatedTasks); + return existingTasks; + }); } catch (Exception e) { taskBatcherListener.onSubmitFailure(tasks); throw e; @@ -139,15 +132,13 @@ private void onTimeoutInternal(List tasks, TimeValue time Object batchingKey = firstTask.batchingKey; assert tasks.stream().allMatch(t -> t.batchingKey == batchingKey) : "tasks submitted in a batch should share the same batching key: " + tasks; - synchronized (tasksPerBatchingKey) { - LinkedHashSet existingTasks = tasksPerBatchingKey.get(batchingKey); - if (existingTasks != null) { - existingTasks.removeAll(toRemove); - if (existingTasks.isEmpty()) { - tasksPerBatchingKey.remove(batchingKey); - } + tasksPerBatchingKey.computeIfPresent(batchingKey, (tasksKey, existingTasks) -> { + existingTasks.removeAll(toRemove); + if (existingTasks.isEmpty()) { + return null; } - } + return existingTasks; + }); taskBatcherListener.onTimeout(toRemove); onTimeout(toRemove, timeout); } @@ -165,17 +156,15 @@ void runIfNotProcessed(BatchedTask updateTask) { if (updateTask.processed.get() == false) { final List toExecute = new ArrayList<>(); final Map> processTasksBySource = new HashMap<>(); - synchronized (tasksPerBatchingKey) { - LinkedHashSet pending = tasksPerBatchingKey.remove(updateTask.batchingKey); - if (pending != null) { - for (BatchedTask task : pending) { - if (task.processed.getAndSet(true) == false) { - logger.trace("will process {}", task); - toExecute.add(task); - processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task); - } else { - logger.trace("skipping {}, already processed", task); - } + LinkedHashSet pending = tasksPerBatchingKey.remove(updateTask.batchingKey); + if (pending != null) { + for (BatchedTask task : pending) { + if (task.processed.getAndSet(true) == false) { + logger.trace("will process {}", task); + toExecute.add(task); + processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task); + } else { + logger.trace("skipping {}, already processed", task); } } } diff --git a/server/src/main/java/org/opensearch/common/logging/OpenSearchJsonLayout.java b/server/src/main/java/org/opensearch/common/logging/OpenSearchJsonLayout.java index a1b014a81be64..dac9e299e9b7e 100644 --- a/server/src/main/java/org/opensearch/common/logging/OpenSearchJsonLayout.java +++ b/server/src/main/java/org/opensearch/common/logging/OpenSearchJsonLayout.java @@ -83,6 +83,9 @@ *

* The value taken from %OpenSearchMessageField{message} has to be a simple escaped JSON value and is populated in subclasses of * OpenSearchLogMessage + *

+ * The message field is truncated at 10000 characters by default. This limit can be controlled by setting the + * appender.logger.layout.maxmessagelength to the desired limit or to 0 to disable truncation. * * @opensearch.internal */ @@ -91,18 +94,26 @@ public class OpenSearchJsonLayout extends AbstractStringLayout { private final PatternLayout patternLayout; - protected OpenSearchJsonLayout(String typeName, Charset charset, String[] opensearchMessageFields) { + protected OpenSearchJsonLayout(String typeName, Charset charset, String[] opensearchMessageFields, int maxMessageLength) { super(charset); this.patternLayout = PatternLayout.newBuilder() - .withPattern(pattern(typeName, opensearchMessageFields)) + .withPattern(pattern(typeName, opensearchMessageFields, maxMessageLength)) .withAlwaysWriteExceptions(false) .build(); } - private String pattern(String type, String[] opensearchMessageFields) { + private String pattern(String type, String[] opensearchMessageFields, int maxMessageLength) { if (Strings.isEmpty(type)) { throw new IllegalArgumentException("layout parameter 'type_name' cannot be empty"); } + + String messageFormat = "%m"; + if (maxMessageLength < 0) { + throw new IllegalArgumentException("layout parameter 'maxmessagelength' cannot be a negative number"); + } else if (maxMessageLength > 0) { + messageFormat = "%.-" + Integer.toString(maxMessageLength) + "m"; + } + Map map = new LinkedHashMap<>(); map.put("type", inQuotes(type)); map.put("timestamp", inQuotes("%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}")); @@ -110,7 +121,7 @@ private String pattern(String type, String[] opensearchMessageFields) { map.put("component", inQuotes("%c{1.}")); map.put("cluster.name", inQuotes("${sys:opensearch.logs.cluster_name}")); map.put("node.name", inQuotes("%node_name")); - map.put("message", inQuotes("%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}")); + map.put("message", inQuotes("%notEmpty{%enc{%marker}{JSON} }%enc{" + messageFormat + "}{JSON}")); for (String key : opensearchMessageFields) { map.put(key, inQuotes("%OpenSearchMessageField{" + key + "}")); @@ -162,8 +173,8 @@ private String inQuotes(String s) { } @PluginFactory - public static OpenSearchJsonLayout createLayout(String type, Charset charset, String[] opensearchmessagefields) { - return new OpenSearchJsonLayout(type, charset, opensearchmessagefields); + public static OpenSearchJsonLayout createLayout(String type, Charset charset, String[] opensearchmessagefields, int maxMessageLength) { + return new OpenSearchJsonLayout(type, charset, opensearchmessagefields, maxMessageLength); } PatternLayout getPatternLayout() { @@ -188,14 +199,18 @@ public static class Builder> extends A @PluginAttribute("opensearchmessagefields") private String opensearchMessageFields; + @PluginAttribute(value = "maxmessagelength", defaultInt = 10000) + private int maxMessageLength; + public Builder() { setCharset(StandardCharsets.UTF_8); + setMaxMessageLength(10000); } @Override public OpenSearchJsonLayout build() { String[] split = Strings.isNullOrEmpty(opensearchMessageFields) ? new String[] {} : opensearchMessageFields.split(","); - return OpenSearchJsonLayout.createLayout(type, charset, split); + return OpenSearchJsonLayout.createLayout(type, charset, split, maxMessageLength); } public Charset getCharset() { @@ -224,6 +239,15 @@ public B setOpenSearchMessageFields(String opensearchMessageFields) { this.opensearchMessageFields = opensearchMessageFields; return asBuilder(); } + + public int getMaxMessageLength() { + return maxMessageLength; + } + + public B setMaxMessageLength(final int maxMessageLength) { + this.maxMessageLength = maxMessageLength; + return asBuilder(); + } } @PluginBuilderFactory diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index cd062b630a8e6..a2c145fb7e8b7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -40,6 +40,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressure; +import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; @@ -233,6 +234,7 @@ public void apply(Settings value, Settings current, Settings previous) { AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, @@ -625,7 +627,11 @@ public void apply(Settings value, Settings current, Settings previous) { SearchShardTaskSettings.SETTING_TOTAL_HEAP_PERCENT_THRESHOLD, SearchBackpressureSettings.SETTING_CANCELLATION_RATIO, // deprecated SearchBackpressureSettings.SETTING_CANCELLATION_RATE, // deprecated - SearchBackpressureSettings.SETTING_CANCELLATION_BURST // deprecated + SearchBackpressureSettings.SETTING_CANCELLATION_BURST, // deprecated + SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED, + SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS, + SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING, + SegmentReplicationPressureService.MAX_ALLOWED_STALE_SHARDS ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 26a60e24b86b2..35063d89a68e1 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -40,6 +40,9 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.MemorySizeValue; @@ -166,8 +169,8 @@ public enum Property { protected final Function defaultValue; @Nullable protected final Setting fallbackSetting; - private final Function parser; - private final Validator validator; + protected final Function parser; + protected final Validator validator; private final EnumSet properties; private static final EnumSet EMPTY_PROPERTIES = EnumSet.noneOf(Property.class); @@ -1247,6 +1250,40 @@ private static boolean isFiltered(Property[] properties) { return properties != null && Arrays.asList(properties).contains(Property.Filtered); } + /** + * A writeable validator able to check the value of string type custom setting by using regular expression + */ + public static class RegexValidator implements Writeable, Validator { + private Pattern pattern; + + /** + * @param regex A regular expression containing the only valid input for this setting. + */ + public RegexValidator(String regex) { + this.pattern = Pattern.compile(regex); + } + + public RegexValidator(StreamInput in) throws IOException { + this.pattern = Pattern.compile(in.readString()); + } + + Pattern getPattern() { + return pattern; + } + + @Override + public void validate(String value) { + if (!pattern.matcher(value).matches()) { + throw new IllegalArgumentException("Setting [" + value + "] does not match regex [" + pattern.pattern() + "]"); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pattern.pattern()); + } + } + // Float private static float parseFloat(String s, float minValue, float maxValue, String key, boolean isFiltered) { @@ -1287,12 +1324,80 @@ public static Setting floatSetting( return new Setting<>( key, Float.toString(defaultValue), - (s) -> parseFloat(s, minValue, maxValue, key, isFiltered(properties)), + new FloatParser(minValue, maxValue, key, isFiltered(properties)), validator, properties ); } + /** + * A writeable parser for float + * + */ + public static class FloatParser implements Function, Writeable { + private float minValue; + private float maxValue; + private String key; + private boolean isFiltered; + + public FloatParser(float minValue, float maxValue, String key, boolean isFiltered) { + this.minValue = minValue; + this.maxValue = maxValue; + this.key = key; + this.isFiltered = isFiltered; + } + + public FloatParser(StreamInput in) throws IOException { + minValue = in.readFloat(); + maxValue = in.readFloat(); + key = in.readString(); + isFiltered = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeFloat(minValue); + out.writeFloat(maxValue); + out.writeString(key); + out.writeBoolean(isFiltered); + } + + public float getMin() { + return minValue; + } + + public float getMax() { + return maxValue; + } + + public String getKey() { + return key; + } + + public boolean getFilterStatus() { + return isFiltered; + } + + @Override + public Float apply(String s) { + return parseFloat(s, minValue, maxValue, key, isFiltered); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + FloatParser that = (FloatParser) obj; + return Objects.equals(key, that.key) + && Objects.equals(minValue, that.minValue) + && Objects.equals(maxValue, that.maxValue) + && Objects.equals(isFiltered, that.isFiltered); + } + + public int hashCode() { + return Objects.hash(minValue, maxValue, key, isFiltered); + } + } + // Setting with fallback public static Setting floatSetting(String key, Setting fallbackSetting, Property... properties) { @@ -1389,12 +1494,80 @@ public static Setting intSetting( return new Setting<>( key, Integer.toString(defaultValue), - (s) -> parseInt(s, minValue, maxValue, key, isFiltered(properties)), + new IntegerParser(minValue, maxValue, key, isFiltered(properties)), validator, properties ); } + /** + * A writeable parser for integer + * + */ + public static class IntegerParser implements Function, Writeable { + private String key; + private int minValue; + private int maxValue; + private boolean isFiltered; + + public IntegerParser(int minValue, int maxValue, String key, boolean isFiltered) { + this.minValue = minValue; + this.maxValue = maxValue; + this.key = key; + this.isFiltered = isFiltered; + } + + public IntegerParser(StreamInput in) throws IOException { + minValue = in.readInt(); + maxValue = in.readInt(); + key = in.readString(); + isFiltered = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(minValue); + out.writeInt(maxValue); + out.writeString(key); + out.writeBoolean(isFiltered); + } + + public int getMin() { + return minValue; + } + + public int getMax() { + return maxValue; + } + + public String getKey() { + return key; + } + + public boolean getFilterStatus() { + return isFiltered; + } + + @Override + public Integer apply(String s) { + return parseInt(s, minValue, maxValue, key, isFiltered); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + IntegerParser that = (IntegerParser) obj; + return Objects.equals(key, that.key) + && Objects.equals(minValue, that.minValue) + && Objects.equals(maxValue, that.maxValue) + && Objects.equals(isFiltered, that.isFiltered); + } + + public int hashCode() { + return Objects.hash(minValue, maxValue, key, isFiltered); + } + } + // Setting with fallback public static Setting intSetting(String key, Setting fallbackSetting, Property... properties) { @@ -1483,12 +1656,80 @@ public static Setting longSetting( return new Setting<>( key, Long.toString(defaultValue), - (s) -> parseLong(s, minValue, maxValue, key, isFiltered(properties)), + new LongParser(minValue, maxValue, key, isFiltered(properties)), validator, properties ); } + /** + * A writeable parser for long + * + */ + public static class LongParser implements Function, Writeable { + private String key; + private long minValue; + private long maxValue; + private boolean isFiltered; + + public LongParser(long minValue, long maxValue, String key, boolean isFiltered) { + this.minValue = minValue; + this.maxValue = maxValue; + this.key = key; + this.isFiltered = isFiltered; + } + + public LongParser(StreamInput in) throws IOException { + minValue = in.readLong(); + maxValue = in.readLong(); + key = in.readString(); + isFiltered = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(minValue); + out.writeLong(maxValue); + out.writeString(key); + out.writeBoolean(isFiltered); + } + + public long getMin() { + return minValue; + } + + public long getMax() { + return maxValue; + } + + public String getKey() { + return key; + } + + public boolean getFilterStatus() { + return isFiltered; + } + + @Override + public Long apply(String s) { + return parseLong(s, minValue, maxValue, key, isFiltered); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + LongParser that = (LongParser) obj; + return Objects.equals(key, that.key) + && Objects.equals(minValue, that.minValue) + && Objects.equals(maxValue, that.maxValue) + && Objects.equals(isFiltered, that.isFiltered); + } + + public int hashCode() { + return Objects.hash(minValue, maxValue, key, isFiltered); + } + } + // Setting with fallback public static Setting longSetting(String key, Setting fallbackSetting, Property... properties) { @@ -1577,12 +1818,80 @@ public static Setting doubleSetting( return new Setting<>( key, Double.toString(defaultValue), - (s) -> parseDouble(s, minValue, maxValue, key, isFiltered(properties)), + new DoubleParser(minValue, maxValue, key, isFiltered(properties)), validator, properties ); } + /** + * A writeable parser for double + * + */ + public static class DoubleParser implements Function, Writeable { + private double minValue; + private double maxValue; + private String key; + private boolean isFiltered; + + public DoubleParser(double minValue, double maxValue, String key, boolean isFiltered) { + this.minValue = minValue; + this.maxValue = maxValue; + this.key = key; + this.isFiltered = isFiltered; + } + + public DoubleParser(StreamInput in) throws IOException { + minValue = in.readDouble(); + maxValue = in.readDouble(); + key = in.readString(); + isFiltered = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(minValue); + out.writeDouble(maxValue); + out.writeString(key); + out.writeBoolean(isFiltered); + } + + public double getMin() { + return minValue; + } + + public double getMax() { + return maxValue; + } + + public String getKey() { + return key; + } + + public boolean getFilterStatus() { + return isFiltered; + } + + @Override + public Double apply(String s) { + return parseDouble(s, minValue, maxValue, key, isFiltered); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + DoubleParser that = (DoubleParser) obj; + return Objects.equals(key, that.key) + && Objects.equals(minValue, that.minValue) + && Objects.equals(maxValue, that.maxValue) + && Objects.equals(isFiltered, that.isFiltered); + } + + public int hashCode() { + return Objects.hash(minValue, maxValue, key, isFiltered); + } + } + // Setting with fallback public static Setting doubleSetting(String key, Setting fallbackSetting, Property... properties) { @@ -1722,11 +2031,11 @@ public static Setting byteSizeSetting(String key, ByteSizeValue v } public static Setting byteSizeSetting(String key, Setting fallbackSetting, Property... properties) { - return new Setting<>(key, fallbackSetting, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties); + return new Setting<>(key, fallbackSetting, new ByteSizeValueParser(key), properties); } public static Setting byteSizeSetting(String key, Function defaultValue, Property... properties) { - return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties); + return new Setting<>(key, defaultValue, new ByteSizeValueParser(key), properties); } public static Setting byteSizeSetting( @@ -1746,7 +2055,68 @@ public static Setting byteSizeSetting( ByteSizeValue maxValue, Property... properties ) { - return new Setting<>(key, defaultValue, (s) -> parseByteSize(s, minValue, maxValue, key), properties); + return new Setting<>(key, defaultValue, new ByteSizeValueParser(minValue, maxValue, key), properties); + } + + /** + * A writeable parser for bytesizevalue + * + */ + public static class ByteSizeValueParser implements Function, Writeable { + private ByteSizeValue minValue; + private ByteSizeValue maxValue; + private String key; + + public ByteSizeValueParser(ByteSizeValue minValue, ByteSizeValue maxValue, String key) { + this.minValue = minValue; + this.maxValue = maxValue; + this.key = key; + } + + public ByteSizeValueParser(String key) { + this(new ByteSizeValue(-1), new ByteSizeValue(Long.MAX_VALUE), key); + } + + public ByteSizeValueParser(StreamInput in) throws IOException { + minValue = new ByteSizeValue(in); + maxValue = new ByteSizeValue(in); + key = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ((ByteSizeValue) minValue).writeTo(out); + ((ByteSizeValue) maxValue).writeTo(out); + out.writeString(key); + } + + public ByteSizeValue getMin() { + return minValue; + } + + public ByteSizeValue getMax() { + return maxValue; + } + + public String getKey() { + return key; + } + + @Override + public ByteSizeValue apply(String s) { + return parseByteSize(s, minValue, maxValue, key); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + ByteSizeValueParser that = (ByteSizeValueParser) obj; + return Objects.equals(key, that.key) && Objects.equals(minValue, that.minValue) && Objects.equals(maxValue, that.maxValue); + } + + public int hashCode() { + return Objects.hash(key, minValue, maxValue); + } } public static ByteSizeValue parseByteSize(String s, ByteSizeValue minValue, ByteSizeValue maxValue, String key) { @@ -1799,7 +2169,7 @@ public static Setting memorySizeSetting(String key, ByteSizeValue * @return the setting object */ public static Setting memorySizeSetting(String key, Function defaultValue, Property... properties) { - return new Setting<>(key, defaultValue, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); + return new Setting<>(key, defaultValue, new MemorySizeValueParser(key), properties); } /** @@ -1813,7 +2183,47 @@ public static Setting memorySizeSetting(String key, Function memorySizeSetting(String key, String defaultPercentage, Property... properties) { - return new Setting<>(key, (s) -> defaultPercentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); + return new Setting<>(key, (s) -> defaultPercentage, new MemorySizeValueParser(key), properties); + } + + /** + * A writeable parser for memory size value + */ + public static class MemorySizeValueParser implements Function, Writeable { + private String key; + + public MemorySizeValueParser(String key) { + this.key = key; + } + + public MemorySizeValueParser(StreamInput in) throws IOException { + key = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(key); + } + + public String getKey() { + return key; + } + + @Override + public ByteSizeValue apply(String s) { + return MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + MemorySizeValueParser that = (MemorySizeValueParser) obj; + return Objects.equals(key, that.key); + } + + public int hashCode() { + return Objects.hash(key); + } } /** @@ -2073,6 +2483,63 @@ public static Setting timeSetting( ); } + /** + * A writeable parser for time value only has min value + * + */ + public static class MinTimeValueParser implements Function, Writeable { + private String key; + private TimeValue minValue; + private boolean isFiltered; + + public MinTimeValueParser(String key, TimeValue minValue, boolean isFiltered) { + this.key = key; + this.minValue = minValue; + this.isFiltered = isFiltered; + } + + public MinTimeValueParser(StreamInput in) throws IOException { + key = in.readString(); + minValue = in.readTimeValue(); + isFiltered = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(key); + out.writeTimeValue(minValue); + out.writeBoolean(isFiltered); + } + + public TimeValue getMin() { + return minValue; + } + + public String getKey() { + return key; + } + + public boolean getFilterStatus() { + return isFiltered; + } + + @Override + public TimeValue apply(String s) { + return minTimeValueParser(key, minValue, isFiltered).apply(s); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + MinTimeValueParser that = (MinTimeValueParser) obj; + return Objects.equals(key, that.key) && Objects.equals(minValue, that.minValue) && Objects.equals(isFiltered, that.isFiltered); + } + + public int hashCode() { + return Objects.hash(key, minValue, isFiltered); + } + } + public static Setting timeSetting( final String key, Function defaultValue, @@ -2083,7 +2550,7 @@ public static Setting timeSetting( return new Setting<>( simpleKey, s -> defaultValue.apply(s).getStringRep(), - minTimeValueParser(key, minValue, isFiltered(properties)), + new MinTimeValueParser(key, minValue, isFiltered(properties)), properties ); } @@ -2099,11 +2566,79 @@ public static Setting timeSetting( return new Setting<>( simpleKey, s -> defaultValue.getStringRep(), - minMaxTimeValueParser(key, minValue, maxValue, isFiltered(properties)), + new MinMaxTimeValueParser(key, minValue, maxValue, isFiltered(properties)), properties ); } + /** + * A writeable parser for time value have min and max value + * + */ + public static class MinMaxTimeValueParser implements Function, Writeable { + private String key; + private TimeValue minValue; + private TimeValue maxValue; + private boolean isFiltered; + + public MinMaxTimeValueParser(String key, TimeValue minValue, TimeValue maxValue, boolean isFiltered) { + this.key = key; + this.minValue = minValue; + this.maxValue = maxValue; + this.isFiltered = isFiltered; + } + + public MinMaxTimeValueParser(StreamInput in) throws IOException { + key = in.readString(); + minValue = in.readTimeValue(); + maxValue = in.readTimeValue(); + isFiltered = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(key); + out.writeTimeValue(minValue); + out.writeTimeValue(maxValue); + out.writeBoolean(isFiltered); + } + + public TimeValue getMin() { + return minValue; + } + + public TimeValue getMax() { + return maxValue; + } + + public String getKey() { + return key; + } + + public boolean getFilterStatus() { + return isFiltered; + } + + @Override + public TimeValue apply(String s) { + return minMaxTimeValueParser(key, minValue, maxValue, isFiltered).apply(s); + } + + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + MinMaxTimeValueParser that = (MinMaxTimeValueParser) obj; + return Objects.equals(key, that.key) + && Objects.equals(minValue, that.minValue) + && Objects.equals(maxValue, that.maxValue) + && Objects.equals(isFiltered, that.isFiltered); + } + + public int hashCode() { + return Objects.hash(key, minValue, maxValue, isFiltered); + } + } + private static Function minTimeValueParser(final String key, final TimeValue minValue, boolean isFiltered) { return s -> { TimeValue value; diff --git a/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java b/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java index 2dd0d85ab0cd7..1225202612129 100644 --- a/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java +++ b/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java @@ -9,10 +9,20 @@ package org.opensearch.common.settings; import org.opensearch.Version; +import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.settings.Setting.ByteSizeValueParser; +import org.opensearch.common.settings.Setting.DoubleParser; +import org.opensearch.common.settings.Setting.FloatParser; +import org.opensearch.common.settings.Setting.IntegerParser; +import org.opensearch.common.settings.Setting.LongParser; +import org.opensearch.common.settings.Setting.MemorySizeValueParser; +import org.opensearch.common.settings.Setting.MinMaxTimeValueParser; +import org.opensearch.common.settings.Setting.MinTimeValueParser; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Setting.RegexValidator; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import java.io.IOException; @@ -85,12 +95,22 @@ public WriteableSetting(StreamInput in) throws IOException { if (hasFallback) { fallback = new WriteableSetting(in); } - // We are using known types so don't need the parser - // We are not using validator + // Read the parser + Object parser = null; + boolean isParserWriteable = in.readBoolean(); + if (isParserWriteable) { + parser = readParser(in, parser); + } + // Read the Validator + Object validator = new Object(); + boolean isValidatorWriteable = in.readBoolean(); + if (isValidatorWriteable) { + validator = readValidator(in); + } // Read properties EnumSet propSet = in.readEnumSet(Property.class); // Put it all in a setting object - this.setting = createSetting(type, key, defaultValue, fallback, propSet.toArray(Property[]::new)); + this.setting = createSetting(type, key, defaultValue, parser, validator, fallback, propSet.toArray(Property[]::new)); } /** @@ -138,6 +158,8 @@ private Setting createSetting( SettingType type, String key, Object defaultValue, + @Nullable Object parser, + Object validator, WriteableSetting fallback, Property[] propertyArray ) { @@ -147,33 +169,96 @@ private Setting createSetting( ? Setting.boolSetting(key, (boolean) defaultValue, propertyArray) : Setting.boolSetting(key, (Setting) fallback.getSetting(), propertyArray); case Integer: - return fallback == null - ? Setting.intSetting(key, (int) defaultValue, propertyArray) - : Setting.intSetting(key, (Setting) fallback.getSetting(), propertyArray); + if (fallback == null && parser instanceof Writeable) { + return Setting.intSetting( + key, + (int) defaultValue, + ((IntegerParser) parser).getMin(), + ((IntegerParser) parser).getMax(), + propertyArray + ); + } else if (fallback == null) { + return Setting.intSetting(key, (int) defaultValue, propertyArray); + } + return Setting.intSetting(key, (Setting) fallback.getSetting(), propertyArray); case Long: - return fallback == null - ? Setting.longSetting(key, (long) defaultValue, propertyArray) - : Setting.longSetting(key, (Setting) fallback.getSetting(), propertyArray); + if (fallback == null && parser instanceof Writeable) { + return Setting.longSetting( + key, + (long) defaultValue, + ((LongParser) parser).getMin(), + ((LongParser) parser).getMax(), + propertyArray + ); + } else if (fallback == null) { + return Setting.longSetting(key, (long) defaultValue, propertyArray); + } + return Setting.longSetting(key, (Setting) fallback.getSetting(), propertyArray); case Float: - return fallback == null - ? Setting.floatSetting(key, (float) defaultValue, propertyArray) - : Setting.floatSetting(key, (Setting) fallback.getSetting(), propertyArray); + if (fallback == null && parser instanceof Writeable) { + return Setting.floatSetting( + key, + (float) defaultValue, + ((FloatParser) parser).getMin(), + ((FloatParser) parser).getMax(), + propertyArray + ); + } else if (fallback == null) { + return Setting.floatSetting(key, (float) defaultValue, propertyArray); + } + return Setting.floatSetting(key, (Setting) fallback.getSetting(), propertyArray); case Double: - return fallback == null - ? Setting.doubleSetting(key, (double) defaultValue, propertyArray) - : Setting.doubleSetting(key, (Setting) fallback.getSetting(), propertyArray); + if (fallback == null && parser instanceof Writeable) { + return Setting.doubleSetting( + key, + (double) defaultValue, + ((DoubleParser) parser).getMin(), + ((DoubleParser) parser).getMax(), + propertyArray + ); + } else if (fallback == null) { + return Setting.doubleSetting(key, (double) defaultValue, propertyArray); + } + return Setting.doubleSetting(key, (Setting) fallback.getSetting(), propertyArray); case String: return fallback == null ? Setting.simpleString(key, (String) defaultValue, propertyArray) : Setting.simpleString(key, (Setting) fallback.getSetting(), propertyArray); case TimeValue: - return fallback == null - ? Setting.timeSetting(key, (TimeValue) defaultValue, propertyArray) - : Setting.timeSetting(key, (Setting) fallback.getSetting(), propertyArray); + if (fallback == null && parser instanceof Writeable) { + if (parser instanceof MinMaxTimeValueParser) { + return Setting.timeSetting( + key, + (TimeValue) defaultValue, + ((MinMaxTimeValueParser) parser).getMin(), + ((MinMaxTimeValueParser) parser).getMax(), + propertyArray + ); + } else { + return Setting.timeSetting(key, (TimeValue) defaultValue, ((MinTimeValueParser) parser).getMin(), propertyArray); + } + } else if (fallback == null) { + return Setting.timeSetting(key, (TimeValue) defaultValue, propertyArray); + } + return Setting.timeSetting(key, (Setting) fallback.getSetting(), propertyArray); case ByteSizeValue: - return fallback == null - ? Setting.byteSizeSetting(key, (ByteSizeValue) defaultValue, propertyArray) - : Setting.byteSizeSetting(key, (Setting) fallback.getSetting(), propertyArray); + if (fallback == null && parser instanceof Writeable) { + if (parser instanceof MemorySizeValueParser) { + return Setting.memorySizeSetting(key, (ByteSizeValue) defaultValue, propertyArray); + } else { + ByteSizeValueParser byteSizeValueParser = (ByteSizeValueParser) parser; + return Setting.byteSizeSetting( + key, + (ByteSizeValue) defaultValue, + byteSizeValueParser.getMin(), + byteSizeValueParser.getMax(), + propertyArray + ); + } + } else if (fallback == null) { + return Setting.byteSizeSetting(key, (ByteSizeValue) defaultValue, propertyArray); + } + return Setting.byteSizeSetting(key, (Setting) fallback.getSetting(), propertyArray); case Version: // No fallback option on this method return Setting.versionSetting(key, (Version) defaultValue, propertyArray); @@ -197,12 +282,63 @@ public void writeTo(StreamOutput out) throws IOException { if (hasFallback) { new WriteableSetting(setting.fallbackSetting, type).writeTo(out); } - // We are using known types so don't need the parser - // We are not using validator + // Write a boolean specifying whether the parser is an instanceof writeable + boolean isParserWriteable = setting.parser instanceof Writeable; + out.writeBoolean(isParserWriteable); + if (isParserWriteable) { + writeParser(out, setting.parser); + } + // Write the validator + boolean isValidatorWriteable = setting.validator instanceof Writeable; + out.writeBoolean(isValidatorWriteable); + if (isValidatorWriteable) { + writeValidator(out, setting.validator); + } // Write properties out.writeEnumSet(setting.getProperties()); } + private void writeParser(StreamOutput out, Object parser) throws IOException { + switch (type) { + case Integer: + ((IntegerParser) parser).writeTo(out); + break; + case Long: + ((LongParser) parser).writeTo(out); + break; + case Float: + ((FloatParser) parser).writeTo(out); + break; + case Double: + ((DoubleParser) parser).writeTo(out); + break; + case TimeValue: + if (parser instanceof MinMaxTimeValueParser) { + out.writeBoolean(true); + ((MinMaxTimeValueParser) parser).writeTo(out); + } else if (parser instanceof MinTimeValueParser) { + out.writeBoolean(false); + ((MinTimeValueParser) parser).writeTo(out); + } + break; + case ByteSizeValue: + if (parser instanceof ByteSizeValueParser) { + out.writeBoolean(true); + ((ByteSizeValueParser) parser).writeTo(out); + } else if (parser instanceof MemorySizeValueParser) { + out.writeBoolean(false); + ((MemorySizeValueParser) parser).writeTo(out); + } + break; + default: + throw new IllegalArgumentException("A SettingType has been added to the enum and not handled here."); + } + } + + private void writeValidator(StreamOutput out, Object validator) throws IOException { + ((Writeable) validator).writeTo(out); + } + private void writeDefaultValue(StreamOutput out, Object defaultValue) throws IOException { switch (type) { case Boolean: @@ -240,6 +376,37 @@ private void writeDefaultValue(StreamOutput out, Object defaultValue) throws IOE } } + private Object readParser(StreamInput in, Object parser) throws IOException { + switch (type) { + case Integer: + return new IntegerParser(in); + case Long: + return new LongParser(in); + case Float: + return new FloatParser(in); + case Double: + return new DoubleParser(in); + case TimeValue: + if (in.readBoolean()) { + return new MinMaxTimeValueParser(in); + } else { + return new MinTimeValueParser(in); + } + case ByteSizeValue: + if (in.readBoolean()) { + return new ByteSizeValueParser(in); + } else { + return new MemorySizeValueParser(in); + } + default: + throw new IllegalArgumentException("A SettingType has been added to the enum and not handled here."); + } + } + + private Object readValidator(StreamInput in) throws IOException { + return new RegexValidator(in); + } + private Object readDefaultValue(StreamInput in) throws IOException { switch (type) { case Boolean: diff --git a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java index 28947b3936843..813f7e2a43c0f 100644 --- a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java @@ -189,7 +189,13 @@ public static Fuzziness parse(XContentParser parser) throws IOException { return build(fuzziness); } } catch (NumberFormatException ex) { - return build(fuzziness); + // Validate if the fuzziness value is formatted correctly as a numeric value. + try { + final float minimumSimilarity = Float.parseFloat(fuzziness); + return build(fuzziness); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid fuzziness value: " + fuzziness); + } } default: @@ -225,7 +231,7 @@ public float asFloat() { if (this.equals(AUTO) || isAutoWithCustomValues()) { return 1f; } - return Float.parseFloat(fuzziness.toString()); + return Float.parseFloat(fuzziness); } private int termLen(String text) { @@ -234,9 +240,9 @@ private int termLen(String text) { public String asString() { if (isAutoWithCustomValues()) { - return fuzziness.toString() + ":" + lowDistance + "," + highDistance; + return fuzziness + ":" + lowDistance + "," + highDistance; } - return fuzziness.toString(); + return fuzziness; } private boolean isAutoWithCustomValues() { diff --git a/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java b/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java index 3be97816dc541..1fc0ee7dd325c 100644 --- a/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java +++ b/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java @@ -37,7 +37,9 @@ import org.opensearch.transport.TransportResponse; import java.io.IOException; +import java.util.Arrays; import java.util.Objects; +import java.util.List; /** * PluginResponse to intialize plugin @@ -46,18 +48,22 @@ */ public class InitializeExtensionResponse extends TransportResponse { private String name; + private List implementedInterfaces; - public InitializeExtensionResponse(String name) { + public InitializeExtensionResponse(String name, List implementedInterfaces) { this.name = name; + this.implementedInterfaces = implementedInterfaces; } public InitializeExtensionResponse(StreamInput in) throws IOException { name = in.readString(); + this.implementedInterfaces = Arrays.asList(in.readStringArray()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); + out.writeStringArray(implementedInterfaces.toArray(new String[0])); } /** @@ -68,9 +74,17 @@ public String getName() { return this.name; } + /** + * @return interfaces implemented by an extension + */ + + public List getImplementedInterfaces() { + return implementedInterfaces; + } + @Override public String toString() { - return "PluginResponse{" + "name" + name + "}"; + return "InitializeExtensionResponse{" + "name = " + name + " , " + "implementedInterfaces = " + implementedInterfaces + "}"; } @Override @@ -78,11 +92,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; InitializeExtensionResponse that = (InitializeExtensionResponse) o; - return Objects.equals(name, that.name); + return Objects.equals(name, that.name) && Objects.equals(implementedInterfaces, that.implementedInterfaces); } @Override public int hashCode() { - return Objects.hash(name); + return Objects.hash(name, implementedInterfaces); } } diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 679d0ef3d7cff..75f855cb330ab 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -410,7 +410,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce * If the user doesn't configure the cache size, it fails if the node is a data + search node. * Else it configures the size to 80% of available capacity for a dedicated search node, if not explicitly defined. */ - private void initializeFileCache(Settings settings) { + private void initializeFileCache(Settings settings) throws IOException { if (DiscoveryNode.isSearchNode(settings)) { long capacity = NODE_SEARCH_CACHE_SIZE_SETTING.get(settings).getBytes(); FsInfo.Path info = ExceptionsHelper.catchAsRuntimeException(() -> FsProbe.getFSInfo(this.fileCacheNodePath)); @@ -435,6 +435,8 @@ private void initializeFileCache(Settings settings) { capacity = Math.min(capacity, availableCapacity); fileCacheNodePath.fileCacheReservedSize = new ByteSizeValue(capacity, ByteSizeUnit.BYTES); this.fileCache = FileCacheFactory.createConcurrentLRUFileCache(capacity); + List fileCacheDataPaths = collectFileCacheDataPath(this.fileCacheNodePath); + this.fileCache.restoreFromDirectory(fileCacheDataPaths); } } @@ -1305,7 +1307,9 @@ static List collectFileCacheDataPath(NodePath fileCacheNodePath) throws IO for (Path indexPath : indexStream) { if (Files.isDirectory(indexPath)) { try (Stream shardStream = Files.list(indexPath)) { - shardStream.map(Path::toAbsolutePath).forEach(indexSubPaths::add); + shardStream.filter(NodeEnvironment::isShardPath) + .map(Path::toAbsolutePath) + .forEach(indexSubPaths::add); } } } @@ -1346,6 +1350,18 @@ private static Path resolveIndexCustomLocation(String customDataPath, String ind return resolveBaseCustomLocation(customDataPath, sharedDataPath, nodeLockId).resolve(indexUUID); } + /** + * Resolve the file cache path for remote shards. + * + * @param fileCachePath the file cache path + * @param shardId shard to resolve the path to + */ + public Path resolveFileCacheLocation(final Path fileCachePath, final ShardId shardId) { + return fileCachePath.resolve(Integer.toString(nodeLockId)) + .resolve(shardId.getIndex().getUUID()) + .resolve(Integer.toString(shardId.id())); + } + /** * Resolve the custom path for a index's shard. * Uses the {@code IndexMetadata.SETTING_DATA_PATH} setting to determine diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index 58d6f33292273..35f2eead82159 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -49,6 +49,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -57,6 +58,7 @@ import java.util.stream.Stream; import java.util.stream.StreamSupport; +import static org.opensearch.env.NodeEnvironment.CACHE_FOLDER; import static org.opensearch.env.NodeEnvironment.INDICES_FOLDER; /** @@ -68,12 +70,14 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand { static final String ABORTED_BY_USER_MSG = OpenSearchNodeCommand.ABORTED_BY_USER_MSG; static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = OpenSearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG; - static final String NO_CLEANUP = "Node has node.data=true -> no clean up necessary"; + static final String NO_CLEANUP = "Node has node.data=true and node.search=true -> no clean up necessary"; static final String NO_DATA_TO_CLEAN_UP_FOUND = "No data to clean-up found"; static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found"; + static final String NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND = "No file cache to clean-up found"; + private static final int FILE_CACHE_NODE_PATH_LOCATION = 0; public NodeRepurposeCommand() { - super("Repurpose this node to another cluster-manager/data role, cleaning up any excess persisted data"); + super("Repurpose this node to another cluster-manager/data/search role, cleaning up any excess persisted data"); } void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception { @@ -83,7 +87,7 @@ void testExecute(Terminal terminal, OptionSet options, Environment env) throws E @Override protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); - if (DiscoveryNode.isDataNode(settings)) { + if (DiscoveryNode.isDataNode(settings) && DiscoveryNode.isSearchNode(settings)) { terminal.println(Terminal.Verbosity.NORMAL, NO_CLEANUP); return false; } @@ -94,85 +98,186 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { @Override protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLockId, OptionSet options, Environment env) throws IOException { - assert DiscoveryNode.isDataNode(env.settings()) == false; + assert DiscoveryNode.isDataNode(env.settings()) == false || DiscoveryNode.isSearchNode(env.settings()) == false; + + boolean repurposeData = DiscoveryNode.isDataNode(env.settings()) == false; + boolean repurposeSearch = DiscoveryNode.isSearchNode(env.settings()) == false; if (DiscoveryNode.isClusterManagerNode(env.settings()) == false) { - processNoClusterManagerNoDataNode(terminal, dataPaths, env); + processNoClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeSearch); } else { - processClusterManagerNoDataNode(terminal, dataPaths, env); + processClusterManagerRepurposeNode(terminal, dataPaths, env, repurposeData, repurposeSearch); } } - private void processNoClusterManagerNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + private void processNoClusterManagerRepurposeNode( + Terminal terminal, + Path[] dataPaths, + Environment env, + boolean repurposeData, + boolean repurposeSearch + ) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); + NodeEnvironment.NodePath fileCacheNodePath = toNodePaths(dataPaths)[FILE_CACHE_NODE_PATH_LOCATION]; + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + final Metadata metadata = loadClusterState(terminal, env, persistedClusterStateService).metadata(); - terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); - List shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths); + Set indexPaths = Set.of(); + List shardDataPaths = List.of(); + Set fileCachePaths = Set.of(); + List fileCacheDataPaths = List.of(); terminal.println(Terminal.Verbosity.VERBOSE, "Collecting index metadata paths"); List indexMetadataPaths = NodeEnvironment.collectIndexMetadataPaths(nodePaths); - Set indexPaths = uniqueParentPaths(shardDataPaths, indexMetadataPaths); + if (repurposeData) { + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); + shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths); + indexPaths = uniqueParentPaths(shardDataPaths, indexMetadataPaths); + } - final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + if (repurposeSearch) { + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting file cache data paths"); + fileCacheDataPaths = NodeEnvironment.collectFileCacheDataPath(fileCacheNodePath); + fileCachePaths = uniqueParentPaths(fileCacheDataPaths, indexMetadataPaths); + } - final Metadata metadata = loadClusterState(terminal, env, persistedClusterStateService).metadata(); - if (indexPaths.isEmpty() && metadata.indices().isEmpty()) { + if (repurposeData && repurposeSearch && fileCacheDataPaths.isEmpty() && indexPaths.isEmpty() && metadata.indices().isEmpty()) { terminal.println(Terminal.Verbosity.NORMAL, NO_DATA_TO_CLEAN_UP_FOUND); return; + } else if (repurposeData && !repurposeSearch && indexPaths.isEmpty() && metadata.indices().isEmpty()) { + terminal.println(Terminal.Verbosity.NORMAL, NO_DATA_TO_CLEAN_UP_FOUND); + return; + } else if (!repurposeData && repurposeSearch && fileCacheDataPaths.isEmpty() && metadata.indices().isEmpty()) { + terminal.println(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND); + return; } final Set indexUUIDs = Sets.union( - indexUUIDsFor(indexPaths), - StreamSupport.stream(metadata.indices().values().spliterator(), false) - .map(imd -> imd.value.getIndexUUID()) - .collect(Collectors.toSet()) + indexUUIDsFor(fileCachePaths), + Sets.union( + indexUUIDsFor(indexPaths), + StreamSupport.stream(metadata.indices().values().spliterator(), false) + .map(imd -> imd.value.getIndexUUID()) + .collect(Collectors.toSet()) + ) ); - outputVerboseInformation(terminal, indexPaths, indexUUIDs, metadata); - - terminal.println(noClusterManagerMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size())); + List cleanUpPaths = new ArrayList<>(shardDataPaths); + cleanUpPaths.addAll(fileCacheDataPaths); + outputVerboseInformation(terminal, cleanUpPaths, indexUUIDs, metadata); + terminal.println(noClusterManagerMessage(indexUUIDs.size(), cleanUpPaths.size(), indexMetadataPaths.size())); outputHowToSeeVerboseInformation(terminal); - terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); + if (repurposeData && repurposeSearch) { + terminal.println( + "Node is being re-purposed as no-cluster-manager, no-data and no-search. Clean-up of index data and file cache will be performed." + ); + } else if (repurposeData) { + terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed."); + } else if (repurposeSearch) { + terminal.println( + "Node is being re-purposed as no-cluster-manager and no-search. Clean-up of file cache and corresponding index metadata will be performed." + ); + } confirm(terminal, "Do you want to proceed?"); - removePaths(terminal, indexPaths); // clean-up shard dirs // clean-up all metadata dirs MetadataStateFormat.deleteMetaState(dataPaths); - IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new)); + if (repurposeData) { + removePaths(terminal, indexPaths); // clean-up shard dirs + IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new)); + } + + if (repurposeSearch) { + removePaths(terminal, fileCachePaths); // clean-up file cache dirs + IOUtils.rm(dataPaths[FILE_CACHE_NODE_PATH_LOCATION].resolve(CACHE_FOLDER)); + } - terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); + if (repurposeData && repurposeSearch) { + terminal.println("Node successfully repurposed to no-cluster-manager, no-data and no-search."); + } else if (repurposeData) { + terminal.println("Node successfully repurposed to no-cluster-manager and no-data."); + } else if (repurposeSearch) { + terminal.println("Node successfully repurposed to no-cluster-manager and no-search."); + } } - private void processClusterManagerNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + private void processClusterManagerRepurposeNode( + Terminal terminal, + Path[] dataPaths, + Environment env, + boolean repurposeData, + boolean repurposeSearch + ) throws IOException { NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); + NodeEnvironment.NodePath fileCacheNodePath = toNodePaths(dataPaths)[FILE_CACHE_NODE_PATH_LOCATION]; + final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + final Metadata metadata = loadClusterState(terminal, env, persistedClusterStateService).metadata(); - terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); - List shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths); - if (shardDataPaths.isEmpty()) { - terminal.println(NO_SHARD_DATA_TO_CLEAN_UP_FOUND); - return; - } + Set indexPaths = Set.of(); + List shardDataPaths = List.of(); + Set fileCachePaths = Set.of(); + List fileCacheDataPaths = List.of(); - final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths); + if (repurposeData) { + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); + shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths); + indexPaths = uniqueParentPaths(shardDataPaths); + } - final Metadata metadata = loadClusterState(terminal, env, persistedClusterStateService).metadata(); + if (repurposeSearch) { + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting file cache data paths"); + fileCacheDataPaths = NodeEnvironment.collectFileCacheDataPath(fileCacheNodePath); + fileCachePaths = uniqueParentPaths(fileCacheDataPaths); + } - final Set indexPaths = uniqueParentPaths(shardDataPaths); - final Set indexUUIDs = indexUUIDsFor(indexPaths); + if (repurposeData && repurposeSearch && shardDataPaths.isEmpty() && fileCacheDataPaths.isEmpty()) { + terminal.println(NO_SHARD_DATA_TO_CLEAN_UP_FOUND); + return; + } else if (repurposeData && !repurposeSearch && shardDataPaths.isEmpty()) { + terminal.println(NO_SHARD_DATA_TO_CLEAN_UP_FOUND); + return; + } else if (!repurposeData && repurposeSearch && fileCacheDataPaths.isEmpty()) { + terminal.println(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND); + return; + } - outputVerboseInformation(terminal, shardDataPaths, indexUUIDs, metadata); + final Set indexUUIDs = Sets.union(indexUUIDsFor(indexPaths), indexUUIDsFor(fileCachePaths)); - terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size())); + List cleanUpPaths = new ArrayList<>(shardDataPaths); + cleanUpPaths.addAll(fileCacheDataPaths); + outputVerboseInformation(terminal, cleanUpPaths, indexUUIDs, metadata); + terminal.println(shardMessage(cleanUpPaths.size(), indexUUIDs.size())); outputHowToSeeVerboseInformation(terminal); - terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed."); + if (repurposeData && repurposeSearch) { + terminal.println( + "Node is being re-purposed as cluster-manager, no-data and no-search. Clean-up of shard data and file cache data will be performed." + ); + } else if (repurposeData) { + terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed."); + } else if (repurposeSearch) { + terminal.println("Node is being re-purposed as cluster-manager and no-search. Clean-up of file cache data will be performed."); + } + confirm(terminal, "Do you want to proceed?"); - removePaths(terminal, shardDataPaths); // clean-up shard dirs + if (repurposeData) { + removePaths(terminal, shardDataPaths); // clean-up shard dirs + } + + if (repurposeSearch) { + removePaths(terminal, fileCacheDataPaths); // clean-up file cache dirs + } - terminal.println("Node successfully repurposed to cluster-manager and no-data."); + if (repurposeData && repurposeSearch) { + terminal.println("Node successfully repurposed to cluster-manager, no-data and no-search."); + } else if (repurposeData) { + terminal.println("Node successfully repurposed to cluster-manager and no-data."); + } else if (repurposeSearch) { + terminal.println("Node successfully repurposed to cluster-manager and no-search."); + } } private ClusterState loadClusterState(Terminal terminal, Environment env, PersistedClusterStateService psf) throws IOException { @@ -211,11 +316,17 @@ private Set indexUUIDsFor(Set indexPaths) { } static String noClusterManagerMessage(int indexes, int shards, int indexMetadata) { - return "Found " + indexes + " indices (" + shards + " shards and " + indexMetadata + " index meta data) to clean up"; + return "Found " + + indexes + + " indices (" + + shards + + " shards/file cache folders and " + + indexMetadata + + " index meta data) to clean up"; } static String shardMessage(int shards, int indices) { - return "Found " + shards + " shards in " + indices + " indices to clean up"; + return "Found " + shards + " shards/file cache folders in " + indices + " indices to clean up"; } private void removePaths(Terminal terminal, Collection paths) { diff --git a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java index 7a15be49d86ee..f52e62ef2cbec 100644 --- a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java +++ b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java @@ -34,6 +34,7 @@ public class DiscoveryExtensionNode extends DiscoveryNode implements Writeable, private Version minimumCompatibleVersion; private List dependencies = Collections.emptyList(); + private List implementedInterfaces = Collections.emptyList(); public DiscoveryExtensionNode( String name, @@ -84,6 +85,14 @@ public Version getMinimumCompatibleVersion() { return minimumCompatibleVersion; } + public List getImplementedInterfaces() { + return implementedInterfaces; + } + + public void setImplementedInterfaces(List implementedInterfaces) { + this.implementedInterfaces = implementedInterfaces; + } + public boolean dependenciesContain(ExtensionDependency dependency) { for (ExtensionDependency extensiondependency : this.dependencies) { if (dependency.getUniqueId().equals(extensiondependency.getUniqueId()) diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 323e0c7acd715..4f852ca944966 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -67,7 +67,6 @@ import org.opensearch.transport.TransportService; import org.yaml.snakeyaml.Yaml; import org.opensearch.env.EnvironmentSettingsResponse; -import org.yaml.snakeyaml.constructor.SafeConstructor; /** * The main class for managing Extension communication with the OpenSearch Node. @@ -366,6 +365,7 @@ public InitializeExtensionResponse read(StreamInput in) throws IOException { public void handleResponse(InitializeExtensionResponse response) { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { if (extension.getName().equals(response.getName())) { + extension.setImplementedInterfaces(response.getImplementedInterfaces()); extensions.add(extension); logger.info("Initialized extension: " + extension.getName()); break; @@ -558,7 +558,7 @@ public String executor() { } private ExtensionsSettings readFromExtensionsYml(Path filePath) throws IOException { - Yaml yaml = new Yaml(new SafeConstructor()); + Yaml yaml = new Yaml(); try (InputStream inputStream = Files.newInputStream(filePath)) { Map obj = yaml.load(inputStream); if (obj == null) { diff --git a/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java b/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java index 47061f94dee83..94b15e2192722 100644 --- a/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java +++ b/server/src/main/java/org/opensearch/extensions/RegisterTransportActionsRequest.java @@ -8,18 +8,14 @@ package org.opensearch.extensions; -import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; -import org.opensearch.action.support.TransportAction; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; +import java.util.HashSet; import java.util.Objects; -import java.util.Map.Entry; +import java.util.Set; /** * Request to register extension Transport actions @@ -27,41 +23,27 @@ * @opensearch.internal */ public class RegisterTransportActionsRequest extends TransportRequest { + // The uniqueId defining the extension which runs this action private String uniqueId; - private Map>> transportActions; + // The action names to register + private Set transportActions; - public RegisterTransportActionsRequest( - String uniqueId, - Map>> transportActions - ) { + public RegisterTransportActionsRequest(String uniqueId, Set transportActions) { this.uniqueId = uniqueId; - this.transportActions = new HashMap<>(transportActions); + this.transportActions = transportActions; } public RegisterTransportActionsRequest(StreamInput in) throws IOException { super(in); this.uniqueId = in.readString(); - Map>> actions = new HashMap<>(); - int actionCount = in.readVInt(); - for (int i = 0; i < actionCount; i++) { - try { - String actionName = in.readString(); - @SuppressWarnings("unchecked") - Class> transportAction = (Class< - ? extends TransportAction>) Class.forName(in.readString()); - actions.put(actionName, transportAction); - } catch (ClassNotFoundException e) { - throw new IllegalArgumentException("Could not read transport action"); - } - } - this.transportActions = actions; + this.transportActions = new HashSet<>(in.readStringList()); } public String getUniqueId() { return uniqueId; } - public Map>> getTransportActions() { + public Set getTransportActions() { return transportActions; } @@ -69,12 +51,7 @@ public String getUniqueId() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(uniqueId); - out.writeVInt(this.transportActions.size()); - for (Entry>> action : transportActions - .entrySet()) { - out.writeString(action.getKey()); - out.writeString(action.getValue().getName()); - } + out.writeStringCollection(transportActions); } @Override diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java index f76fe794b2f84..1f2b58c2bd524 100644 --- a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java @@ -93,7 +93,7 @@ public TransportResponse handleRegisterTransportActionsRequest(RegisterTransport logger.debug("Register Transport Actions request recieved {}", transportActionsRequest); DiscoveryExtensionNode extension = extensionIdMap.get(transportActionsRequest.getUniqueId()); try { - for (String action : transportActionsRequest.getTransportActions().keySet()) { + for (String action : transportActionsRequest.getTransportActions()) { registerAction(action, extension); } } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 357be3a9fc2fe..247269e2f1f17 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -198,10 +198,9 @@ public String executor() { restExecuteOnExtensionResponse.getContent() ); // No constructor that includes headers so we roll our own - restExecuteOnExtensionResponse.getHeaders() - .entrySet() - .stream() - .forEach(e -> { e.getValue().stream().forEach(v -> restResponse.addHeader(e.getKey(), v)); }); + restExecuteOnExtensionResponse.getHeaders().entrySet().stream().forEach(e -> { + e.getValue().stream().forEach(v -> restResponse.addHeader(e.getKey(), v)); + }); return channel -> channel.sendResponse(restResponse); } diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 57b9a7c52c2d0..69bc5ef8c37c0 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -635,7 +635,9 @@ public MapperService newIndexMapperService( xContentRegistry, new SimilarityService(indexSettings, scriptService, similarities), mapperRegistry, - () -> { throw new UnsupportedOperationException("no index query shard context available"); }, + () -> { + throw new UnsupportedOperationException("no index query shard context available"); + }, () -> false, scriptService ); diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index 179d79f90cc8c..f73f96df4f9ad 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -221,10 +221,9 @@ public Sort buildIndexSort( } IndexFieldData fieldData; try { - fieldData = fieldDataLookup.apply( - ft, - () -> { throw new UnsupportedOperationException("index sorting not supported on runtime field [" + ft.name() + "]"); } - ); + fieldData = fieldDataLookup.apply(ft, () -> { + throw new UnsupportedOperationException("index sorting not supported on runtime field [" + ft.name() + "]"); + }); } catch (Exception e) { throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]", e); } diff --git a/server/src/main/java/org/opensearch/index/IndexWarmer.java b/server/src/main/java/org/opensearch/index/IndexWarmer.java index 4b49a6c92dd9f..b62afe6b6dcfc 100644 --- a/server/src/main/java/org/opensearch/index/IndexWarmer.java +++ b/server/src/main/java/org/opensearch/index/IndexWarmer.java @@ -166,7 +166,9 @@ public TerminationHandle warmReader(final IndexShard indexShard, final OpenSearc IndexFieldData.Global ifd = indexFieldDataService.getForField( fieldType, indexFieldDataService.index().getName(), - () -> { throw new UnsupportedOperationException("search lookup not available when warming an index"); } + () -> { + throw new UnsupportedOperationException("search lookup not available when warming an index"); + } ); IndexFieldData global = ifd.loadGlobal(reader); if (reader.leaves().isEmpty() == false) { diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java new file mode 100644 index 0000000000000..23beef419e7b6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.Set; + +/** + * Return Segment Replication stats for a Replication Group. + * + * @opensearch.internal + */ +public class SegmentReplicationPerGroupStats implements Writeable, ToXContentFragment { + + private final ShardId shardId; + private final Set replicaStats; + private final long rejectedRequestCount; + + public SegmentReplicationPerGroupStats(ShardId shardId, Set replicaStats, long rejectedRequestCount) { + this.shardId = shardId; + this.replicaStats = replicaStats; + this.rejectedRequestCount = rejectedRequestCount; + } + + public SegmentReplicationPerGroupStats(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + this.replicaStats = in.readSet(SegmentReplicationShardStats::new); + this.rejectedRequestCount = in.readVLong(); + } + + public Set getReplicaStats() { + return replicaStats; + } + + public long getRejectedRequestCount() { + return rejectedRequestCount; + } + + public ShardId getShardId() { + return shardId; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("rejected_requests", rejectedRequestCount); + builder.startArray("replicas"); + for (SegmentReplicationShardStats stats : replicaStats) { + stats.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeCollection(replicaStats); + out.writeVLong(rejectedRequestCount); + } + + @Override + public String toString() { + return "SegmentReplicationPerGroupStats{" + "replicaStats=" + replicaStats + ", rejectedRequestCount=" + rejectedRequestCount + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java new file mode 100644 index 0000000000000..f31e236fb6184 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; + +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Service responsible for applying backpressure for lagging behind replicas when Segment Replication is enabled. + * + * @opensearch.internal + */ +public class SegmentReplicationPressureService { + + private volatile boolean isSegmentReplicationBackpressureEnabled; + private volatile int maxCheckpointsBehind; + private volatile double maxAllowedStaleReplicas; + private volatile TimeValue maxReplicationTime; + + private static final Logger logger = LogManager.getLogger(SegmentReplicationPressureService.class); + + public static final Setting SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED = Setting.boolSetting( + "segrep.pressure.enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting MAX_INDEXING_CHECKPOINTS = Setting.intSetting( + "segrep.pressure.checkpoint.limit", + 4, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting MAX_REPLICATION_TIME_SETTING = Setting.positiveTimeSetting( + "segrep.pressure.time.limit", + TimeValue.timeValueMinutes(5), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting MAX_ALLOWED_STALE_SHARDS = Setting.doubleSetting( + "segrep.pressure.replica.stale.limit", + .5, + 0, + 1, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final IndicesService indicesService; + private final SegmentReplicationStatsTracker tracker; + + @Inject + public SegmentReplicationPressureService(Settings settings, ClusterService clusterService, IndicesService indicesService) { + this.indicesService = indicesService; + this.tracker = new SegmentReplicationStatsTracker(this.indicesService); + + final ClusterSettings clusterSettings = clusterService.getClusterSettings(); + this.isSegmentReplicationBackpressureEnabled = SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.get(settings); + clusterSettings.addSettingsUpdateConsumer( + SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED, + this::setSegmentReplicationBackpressureEnabled + ); + + this.maxCheckpointsBehind = MAX_INDEXING_CHECKPOINTS.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_INDEXING_CHECKPOINTS, this::setMaxCheckpointsBehind); + + this.maxReplicationTime = MAX_REPLICATION_TIME_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_TIME_SETTING, this::setMaxReplicationTime); + + this.maxAllowedStaleReplicas = MAX_ALLOWED_STALE_SHARDS.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_ALLOWED_STALE_SHARDS, this::setMaxAllowedStaleReplicas); + } + + public void isSegrepLimitBreached(ShardId shardId) { + final IndexService indexService = indicesService.indexService(shardId.getIndex()); + final IndexShard shard = indexService.getShard(shardId.id()); + if (isSegmentReplicationBackpressureEnabled && shard.indexSettings().isSegRepEnabled() && shard.routingEntry().primary()) { + validateReplicationGroup(shard); + } + } + + private void validateReplicationGroup(IndexShard shard) { + final Set replicaStats = shard.getReplicationStats(); + final Set staleReplicas = getStaleReplicas(replicaStats); + if (staleReplicas.isEmpty() == false) { + // inSyncIds always considers the primary id, so filter it out. + final float percentStale = staleReplicas.size() * 100f / (shard.getReplicationGroup().getInSyncAllocationIds().size() - 1); + final double maxStaleLimit = maxAllowedStaleReplicas * 100f; + if (percentStale >= maxStaleLimit) { + tracker.incrementRejectionCount(shard.shardId()); + logger.warn("Rejecting write requests for shard, stale shards [{}%] shards: {}", percentStale, staleReplicas); + throw new OpenSearchRejectedExecutionException( + "rejected execution on primary shard: " + shard.shardId() + " Stale Replicas: " + staleReplicas + "]", + false + ); + } + } + } + + private Set getStaleReplicas(final Set replicas) { + return replicas.stream() + .filter(entry -> entry.getCheckpointsBehindCount() > maxCheckpointsBehind) + .filter(entry -> entry.getCurrentReplicationTimeMillis() > maxReplicationTime.millis()) + .collect(Collectors.toSet()); + } + + public SegmentReplicationStats nodeStats() { + return tracker.getStats(); + } + + public SegmentReplicationPerGroupStats getStatsForShard(IndexShard indexShard) { + return tracker.getStatsForShard(indexShard); + } + + public boolean isSegmentReplicationBackpressureEnabled() { + return isSegmentReplicationBackpressureEnabled; + } + + public void setSegmentReplicationBackpressureEnabled(boolean segmentReplicationBackpressureEnabled) { + isSegmentReplicationBackpressureEnabled = segmentReplicationBackpressureEnabled; + } + + public void setMaxCheckpointsBehind(int maxCheckpointsBehind) { + this.maxCheckpointsBehind = maxCheckpointsBehind; + } + + public void setMaxAllowedStaleReplicas(double maxAllowedStaleReplicas) { + this.maxAllowedStaleReplicas = maxAllowedStaleReplicas; + } + + public void setMaxReplicationTime(TimeValue maxReplicationTime) { + this.maxReplicationTime = maxReplicationTime; + } +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java new file mode 100644 index 0000000000000..b68c49453222b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.indices.replication.SegmentReplicationState; + +import java.io.IOException; + +/** + * SegRep stats for a single shard. + * + * @opensearch.internal + */ +public class SegmentReplicationShardStats implements Writeable, ToXContentFragment { + private final String allocationId; + private final long checkpointsBehindCount; + private final long bytesBehindCount; + private final long currentReplicationTimeMillis; + private final long lastCompletedReplicationTimeMillis; + + @Nullable + private SegmentReplicationState currentReplicationState; + + public SegmentReplicationShardStats( + String allocationId, + long checkpointsBehindCount, + long bytesBehindCount, + long currentReplicationTimeMillis, + long lastCompletedReplicationTime + ) { + this.allocationId = allocationId; + this.checkpointsBehindCount = checkpointsBehindCount; + this.bytesBehindCount = bytesBehindCount; + this.currentReplicationTimeMillis = currentReplicationTimeMillis; + this.lastCompletedReplicationTimeMillis = lastCompletedReplicationTime; + } + + public SegmentReplicationShardStats(StreamInput in) throws IOException { + this.allocationId = in.readString(); + this.checkpointsBehindCount = in.readVLong(); + this.bytesBehindCount = in.readVLong(); + this.currentReplicationTimeMillis = in.readVLong(); + this.lastCompletedReplicationTimeMillis = in.readVLong(); + } + + public String getAllocationId() { + return allocationId; + } + + public long getCheckpointsBehindCount() { + return checkpointsBehindCount; + } + + public long getBytesBehindCount() { + return bytesBehindCount; + } + + public long getCurrentReplicationTimeMillis() { + return currentReplicationTimeMillis; + } + + public long getLastCompletedReplicationTimeMillis() { + return lastCompletedReplicationTimeMillis; + } + + public void setCurrentReplicationState(SegmentReplicationState currentReplicationState) { + this.currentReplicationState = currentReplicationState; + } + + @Nullable + public SegmentReplicationState getCurrentReplicationState() { + return currentReplicationState; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("allocation_id", allocationId); + builder.field("checkpoints_behind", checkpointsBehindCount); + builder.field("bytes_behind", new ByteSizeValue(bytesBehindCount).toString()); + builder.field("current_replication_time", new TimeValue(currentReplicationTimeMillis)); + builder.field("last_completed_replication_time", new TimeValue(lastCompletedReplicationTimeMillis)); + if (currentReplicationState != null) { + builder.startObject(); + currentReplicationState.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(allocationId); + out.writeVLong(checkpointsBehindCount); + out.writeVLong(bytesBehindCount); + out.writeVLong(currentReplicationTimeMillis); + out.writeVLong(lastCompletedReplicationTimeMillis); + } + + @Override + public String toString() { + return "SegmentReplicationShardStats{" + + "allocationId=" + + allocationId + + ", checkpointsBehindCount=" + + checkpointsBehindCount + + ", bytesBehindCount=" + + bytesBehindCount + + ", currentReplicationTimeMillis=" + + currentReplicationTimeMillis + + ", lastCompletedReplicationTimeMillis=" + + lastCompletedReplicationTimeMillis + + ", currentReplicationState=" + + currentReplicationState + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java new file mode 100644 index 0000000000000..10975e48443d8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Segment Replication Stats. + * + * @opensearch.internal + */ +public class SegmentReplicationStats implements Writeable, ToXContentFragment { + + private final Map shardStats; + + public SegmentReplicationStats(final Map shardStats) { + this.shardStats = shardStats; + } + + public SegmentReplicationStats(StreamInput in) throws IOException { + int shardEntries = in.readInt(); + shardStats = new HashMap<>(); + for (int i = 0; i < shardEntries; i++) { + ShardId shardId = new ShardId(in); + SegmentReplicationPerGroupStats groupStats = new SegmentReplicationPerGroupStats(in); + shardStats.put(shardId, groupStats); + } + } + + public Map getShardStats() { + return shardStats; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("segment_replication"); + for (Map.Entry entry : shardStats.entrySet()) { + builder.startObject(entry.getKey().toString()); + entry.getValue().toXContent(builder, params); + builder.endObject(); + } + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(shardStats.size()); + for (Map.Entry entry : shardStats.entrySet()) { + entry.getKey().writeTo(out); + entry.getValue().writeTo(out); + } + } + + @Override + public String toString() { + return "SegmentReplicationStats{" + "shardStats=" + shardStats + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java new file mode 100644 index 0000000000000..d7176127615d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Tracker responsible for computing SegmentReplicationStats. + * + * @opensearch.internal + */ +public class SegmentReplicationStatsTracker { + + private final IndicesService indicesService; + private final Map rejectionCount; + + public SegmentReplicationStatsTracker(IndicesService indicesService) { + this.indicesService = indicesService; + rejectionCount = ConcurrentCollections.newConcurrentMap(); + } + + public SegmentReplicationStats getStats() { + Map stats = new HashMap<>(); + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + if (indexShard.indexSettings().isSegRepEnabled() && indexShard.routingEntry().primary()) { + stats.putIfAbsent(indexShard.shardId(), getStatsForShard(indexShard)); + } + } + } + return new SegmentReplicationStats(stats); + } + + public void incrementRejectionCount(ShardId shardId) { + rejectionCount.compute(shardId, (k, v) -> { + if (v == null) { + return new AtomicInteger(1); + } else { + v.incrementAndGet(); + return v; + } + }); + } + + public SegmentReplicationPerGroupStats getStatsForShard(IndexShard indexShard) { + return new SegmentReplicationPerGroupStats( + indexShard.shardId(), + indexShard.getReplicationStats(), + Optional.ofNullable(rejectionCount.get(indexShard.shardId())).map(AtomicInteger::get).orElse(0) + ); + } +} diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index 8ec2b70001fc9..cb686dba1ab0f 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -648,7 +648,9 @@ public IndexAnalyzers build( charFilterFactoryFactories, tokenizerFactoryFactories ), - (k, v) -> { throw new IllegalStateException("already registered analyzer with name: " + entry.getKey()); } + (k, v) -> { + throw new IllegalStateException("already registered analyzer with name: " + entry.getKey()); + } ); } for (Map.Entry> entry : normalizerProviders.entrySet()) { diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 76461783fb724..8071e94d1426d 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -42,6 +42,8 @@ import java.util.function.BiFunction; import java.util.stream.Stream; +import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; + /** * This is an {@link Engine} implementation intended for replica shards when Segment Replication * is enabled. This Engine does not create an IndexWriter, rather it refreshes a {@link NRTReplicationReaderManager} @@ -123,10 +125,11 @@ public TranslogManager translogManager() { return translogManager; } - public synchronized void updateSegments(final SegmentInfos infos, long seqNo) throws IOException { + public synchronized void updateSegments(final SegmentInfos infos) throws IOException { // Update the current infos reference on the Engine's reader. ensureOpen(); try (ReleasableLock lock = writeLock.acquire()) { + final long maxSeqNo = Long.parseLong(infos.userData.get(MAX_SEQ_NO)); final long incomingGeneration = infos.getGeneration(); readerManager.updateSegments(infos); @@ -134,11 +137,11 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th // lower/higher gens are possible from a new primary that was just elected. if (incomingGeneration != lastReceivedGen) { commitSegmentInfos(); - translogManager.getDeletionPolicy().setLocalCheckpointOfSafeCommit(seqNo); + translogManager.getDeletionPolicy().setLocalCheckpointOfSafeCommit(maxSeqNo); translogManager.rollTranslogGeneration(); } lastReceivedGen = incomingGeneration; - localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); + localCheckpointTracker.fastForwardProcessedSeqNo(maxSeqNo); } } @@ -343,11 +346,22 @@ public List segments(boolean verbose) { } @Override - public void refresh(String source) throws EngineException {} + public void refresh(String source) throws EngineException { + maybeRefresh(source); + } @Override public boolean maybeRefresh(String source) throws EngineException { - return false; + try { + return readerManager.maybeRefresh(); + } catch (IOException e) { + try { + failEngine("refresh failed source[" + source + "]", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new RefreshFailedEngineException(shardId, e); + } } @Override diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java index 8fbb24720aedc..00748acb1d76d 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java @@ -51,6 +51,10 @@ public class NRTReplicationReaderManager extends OpenSearchReaderManager { @Override protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException { Objects.requireNonNull(referenceToRefresh); + // checks if an actual refresh (change in segments) happened + if (unwrapStandardReader(referenceToRefresh).getSegmentInfos().version == currentInfos.version) { + return null; + } final List subs = new ArrayList<>(); final StandardDirectoryReader standardDirectoryReader = unwrapStandardReader(referenceToRefresh); for (LeafReaderContext ctx : standardDirectoryReader.leaves()) { diff --git a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java index 936d6b4fb7f8b..f5dc34ab8ac5d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java @@ -74,7 +74,7 @@ public List fetchValues(SourceLookup lookup) { for (String path : sourcePaths) { Object sourceValue = lookup.extractValue(path, nullValue); if (sourceValue == null) { - return org.opensearch.common.collect.List.of(); + return List.of(); } values.addAll((List) parseSourceValue(sourceValue)); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index a346d57924199..4b2f02ac3828a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -425,34 +425,40 @@ private static void innerParseObject( String currentFieldName, XContentParser.Token token ) throws IOException { - assert token == XContentParser.Token.FIELD_NAME || token == XContentParser.Token.END_OBJECT; - String[] paths = null; - while (token != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - paths = splitAndValidatePath(currentFieldName); - if (containsDisabledObjectMapper(mapper, paths)) { - parser.nextToken(); - parser.skipChildren(); + try { + assert token == XContentParser.Token.FIELD_NAME || token == XContentParser.Token.END_OBJECT; + String[] paths = null; + context.incrementFieldCurrentDepth(); + context.checkFieldDepthLimit(); + while (token != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + paths = splitAndValidatePath(currentFieldName); + if (containsDisabledObjectMapper(mapper, paths)) { + parser.nextToken(); + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + parseObject(context, mapper, currentFieldName, paths); + } else if (token == XContentParser.Token.START_ARRAY) { + parseArray(context, mapper, currentFieldName, paths); + } else if (token == XContentParser.Token.VALUE_NULL) { + parseNullValue(context, mapper, currentFieldName, paths); + } else if (token == null) { + throw new MapperParsingException( + "object mapping for [" + + mapper.name() + + "] tried to parse field [" + + currentFieldName + + "] as object, but got EOF, has a concrete value been provided to it?" + ); + } else if (token.isValue()) { + parseValue(context, mapper, currentFieldName, token, paths); } - } else if (token == XContentParser.Token.START_OBJECT) { - parseObject(context, mapper, currentFieldName, paths); - } else if (token == XContentParser.Token.START_ARRAY) { - parseArray(context, mapper, currentFieldName, paths); - } else if (token == XContentParser.Token.VALUE_NULL) { - parseNullValue(context, mapper, currentFieldName, paths); - } else if (token == null) { - throw new MapperParsingException( - "object mapping for [" - + mapper.name() - + "] tried to parse field [" - + currentFieldName - + "] as object, but got EOF, has a concrete value been provided to it?" - ); - } else if (token.isValue()) { - parseValue(context, mapper, currentFieldName, token, paths); + token = parser.nextToken(); } - token = parser.nextToken(); + } finally { + context.decrementFieldCurrentDepth(); } } @@ -563,50 +569,59 @@ private static void parseObject(final ParseContext context, ObjectMapper mapper, private static void parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName, String[] paths) throws IOException { - String arrayFieldName = lastFieldName; - - Mapper mapper = getMapper(context, parentMapper, lastFieldName, paths); - if (mapper != null) { - // There is a concrete mapper for this field already. Need to check if the mapper - // expects an array, if so we pass the context straight to the mapper and if not - // we serialize the array components - if (parsesArrayValue(mapper)) { - parseObjectOrField(context, mapper); - } else { - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); - } - } else { - arrayFieldName = paths[paths.length - 1]; - lastFieldName = arrayFieldName; - Tuple parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper); - parentMapper = parentMapperTuple.v2(); - ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); - if (dynamic == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName); - } else if (dynamic == ObjectMapper.Dynamic.TRUE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, XContentFieldType.OBJECT); - if (builder == null) { - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + try { + String arrayFieldName = lastFieldName; + context.incrementFieldArrayDepth(); + context.checkFieldArrayDepthLimit(); + + Mapper mapper = getMapper(context, parentMapper, lastFieldName, paths); + if (mapper != null) { + // There is a concrete mapper for this field already. Need to check if the mapper + // expects an array, if so we pass the context straight to the mapper and if not + // we serialize the array components + if (parsesArrayValue(mapper)) { + parseObjectOrField(context, mapper); } else { - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); - mapper = builder.build(builderContext); - assert mapper != null; - if (parsesArrayValue(mapper)) { - context.addDynamicMapper(mapper); - context.path().add(arrayFieldName); - parseObjectOrField(context, mapper); - context.path().remove(); - } else { + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } else { + arrayFieldName = paths[paths.length - 1]; + lastFieldName = arrayFieldName; + Tuple parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper); + parentMapper = parentMapperTuple.v2(); + ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName); + } else if (dynamic == ObjectMapper.Dynamic.TRUE) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, XContentFieldType.OBJECT); + if (builder == null) { parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } else { + Mapper.BuilderContext builderContext = new Mapper.BuilderContext( + context.indexSettings().getSettings(), + context.path() + ); + mapper = builder.build(builderContext); + assert mapper != null; + if (parsesArrayValue(mapper)) { + context.addDynamicMapper(mapper); + context.path().add(arrayFieldName); + parseObjectOrField(context, mapper); + context.path().remove(); + } else { + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } } + } else { + // TODO: shouldn't this skip, not parse? + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + for (int i = 0; i < parentMapperTuple.v1(); i++) { + context.path().remove(); } - } else { - // TODO: shouldn't this skip, not parse? - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); - } - for (int i = 0; i < parentMapperTuple.v1(); i++) { - context.path().remove(); } + } finally { + context.decrementFieldArrayDepth(); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java index bede143ed5e92..8e1b6f2a3c08b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java @@ -153,9 +153,7 @@ public Set sourcePaths(String field) { } } - return fieldToCopiedFields.containsKey(resolvedField) - ? fieldToCopiedFields.get(resolvedField) - : org.opensearch.common.collect.Set.of(resolvedField); + return fieldToCopiedFields.containsKey(resolvedField) ? fieldToCopiedFields.get(resolvedField) : Set.of(resolvedField); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java index 24f27139f6f4c..12d551edcd3d6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java @@ -38,6 +38,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.OpenSearchParseException; import org.opensearch.index.IndexSettings; import java.util.ArrayList; @@ -312,6 +313,36 @@ public void addIgnoredField(String field) { public Collection getIgnoredFields() { return in.getIgnoredFields(); } + + @Override + public void incrementFieldCurrentDepth() { + in.incrementFieldCurrentDepth(); + } + + @Override + public void decrementFieldCurrentDepth() { + in.decrementFieldCurrentDepth(); + } + + @Override + public void checkFieldDepthLimit() { + in.checkFieldDepthLimit(); + } + + @Override + public void incrementFieldArrayDepth() { + in.incrementFieldArrayDepth(); + } + + @Override + public void decrementFieldArrayDepth() { + in.decrementFieldArrayDepth(); + } + + @Override + public void checkFieldArrayDepthLimit() { + in.checkFieldArrayDepthLimit(); + } } /** @@ -345,6 +376,14 @@ public static class InternalParseContext extends ParseContext { private long numNestedDocs; + private long currentFieldDepth; + + private final long maxAllowedFieldDepth; + + private long currentArrayDepth; + + private final long maxAllowedArrayDepth; + private final List dynamicMappers; private boolean docsReversed = false; @@ -371,6 +410,10 @@ public InternalParseContext( this.dynamicMappers = new ArrayList<>(); this.maxAllowedNumNestedDocs = indexSettings.getMappingNestedDocsLimit(); this.numNestedDocs = 0L; + this.currentFieldDepth = 0L; + this.currentArrayDepth = 0L; + this.maxAllowedFieldDepth = indexSettings.getMappingDepthLimit(); + this.maxAllowedArrayDepth = indexSettings.getMappingDepthLimit(); } @Override @@ -522,6 +565,60 @@ public void addIgnoredField(String field) { public Collection getIgnoredFields() { return Collections.unmodifiableCollection(ignoredFields); } + + @Override + public void incrementFieldCurrentDepth() { + this.currentFieldDepth++; + } + + @Override + public void decrementFieldCurrentDepth() { + if (this.currentFieldDepth > 0) { + this.currentFieldDepth--; + } + } + + @Override + public void checkFieldDepthLimit() { + if (this.currentFieldDepth > maxAllowedFieldDepth) { + this.currentFieldDepth = 0; + throw new OpenSearchParseException( + "The depth of the field has exceeded the allowed limit of [" + + maxAllowedFieldDepth + + "]." + + " This limit can be set by changing the [" + + MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey() + + "] index level setting." + ); + } + } + + @Override + public void incrementFieldArrayDepth() { + this.currentArrayDepth++; + } + + @Override + public void decrementFieldArrayDepth() { + if (this.currentArrayDepth > 0) { + this.currentArrayDepth--; + } + } + + @Override + public void checkFieldArrayDepthLimit() { + if (this.currentArrayDepth > maxAllowedArrayDepth) { + this.currentArrayDepth = 0; + throw new OpenSearchParseException( + "The depth of the nested array field has exceeded the allowed limit of [" + + maxAllowedArrayDepth + + "]." + + " This limit can be set by changing the [" + + MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey() + + "] index level setting." + ); + } + } } /** @@ -687,4 +784,17 @@ public final T parseExternalValue(Class clazz) { * Get dynamic mappers created while parsing. */ public abstract List getDynamicMappers(); + + public abstract void incrementFieldCurrentDepth(); + + public abstract void decrementFieldCurrentDepth(); + + public abstract void checkFieldDepthLimit(); + + public abstract void incrementFieldArrayDepth(); + + public abstract void decrementFieldArrayDepth(); + + public abstract void checkFieldArrayDepthLimit(); + } diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java index d602fceeed041..69f53ba126790 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java @@ -75,7 +75,7 @@ public List fetchValues(SourceLookup lookup) { for (String path : sourcePaths) { Object sourceValue = lookup.extractValue(path, nullValue); if (sourceValue == null) { - return org.opensearch.common.collect.List.of(); + return List.of(); } // We allow source values to contain multiple levels of arrays, such as `"field": [[1, 2]]`. diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 13ff1b91727eb..6edf008fc35b3 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -34,6 +34,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongMap; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -48,14 +49,18 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.gateway.WriteStateException; import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.engine.SafeCommitInfo; import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationTimer; import java.io.IOException; import java.nio.file.Path; @@ -68,6 +73,7 @@ import java.util.Objects; import java.util.OptionalLong; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -243,6 +249,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final Consumer onReplicationGroupUpdated; + private volatile ReplicationCheckpoint lastPublishedReplicationCheckpoint; + /** * Get all retention leases tracked on this shard. * @@ -699,12 +707,29 @@ public static class CheckpointState implements Writeable { */ boolean replicated; + /** + * The currently searchable replication checkpoint. + */ + ReplicationCheckpoint visibleReplicationCheckpoint; + + /** + * Map of ReplicationCheckpoints to ReplicationTimers. Timers are added as new checkpoints are published, and removed when + * the replica is caught up. + */ + Map checkpointTimers; + + /** + * The time it took to complete the most recent replication event. + */ + long lastCompletedReplicationLag; + public CheckpointState(long localCheckpoint, long globalCheckpoint, boolean inSync, boolean tracked, boolean replicated) { this.localCheckpoint = localCheckpoint; this.globalCheckpoint = globalCheckpoint; this.inSync = inSync; this.tracked = tracked; this.replicated = replicated; + this.checkpointTimers = ConcurrentCollections.newConcurrentMap(); } public CheckpointState(StreamInput in) throws IOException { @@ -1137,6 +1162,116 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI assert invariant(); } + /** + * Update the local knowledge of the visible checkpoint for the specified allocation ID. + * + * This method will also stop timers for each shard and compute replication lag metrics. + * + * @param allocationId the allocation ID to update the global checkpoint for + * @param visibleCheckpoint the visible checkpoint + */ + public synchronized void updateVisibleCheckpointForShard(final String allocationId, final ReplicationCheckpoint visibleCheckpoint) { + assert indexSettings.isSegRepEnabled(); + assert primaryMode; + assert handoffInProgress == false; + assert invariant(); + final CheckpointState cps = checkpoints.get(allocationId); + assert !this.shardAllocationId.equals(allocationId) && cps != null; + if (cps.checkpointTimers.isEmpty() == false) { + // stop any timers for checkpoints up to the received cp and remove from cps.checkpointTimers. + // Compute the max lag from the set of completed timers. + final AtomicLong lastFinished = new AtomicLong(0L); + cps.checkpointTimers.entrySet().removeIf((entry) -> { + boolean result = visibleCheckpoint.equals(entry.getKey()) || visibleCheckpoint.isAheadOf(entry.getKey()); + if (result) { + final ReplicationTimer timer = entry.getValue(); + timer.stop(); + lastFinished.set(Math.max(lastFinished.get(), timer.time())); + } + return result; + }); + cps.lastCompletedReplicationLag = lastFinished.get(); + } + logger.trace( + () -> new ParameterizedMessage( + "updated local knowledge for [{}] on the primary of the visible checkpoint from [{}] to [{}], active timers {}", + allocationId, + cps.visibleReplicationCheckpoint, + visibleCheckpoint, + cps.checkpointTimers.keySet() + ) + ); + cps.visibleReplicationCheckpoint = visibleCheckpoint; + assert invariant(); + } + + /** + * After a new checkpoint is published, start a timer for each replica to the checkpoint. + * @param checkpoint {@link ReplicationCheckpoint} + */ + public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint checkpoint) { + assert indexSettings.isSegRepEnabled(); + assert primaryMode; + assert handoffInProgress == false; + if (checkpoint.equals(lastPublishedReplicationCheckpoint) == false) { + this.lastPublishedReplicationCheckpoint = checkpoint; + for (Map.Entry entry : checkpoints.entrySet()) { + if (entry.getKey().equals(this.shardAllocationId) == false) { + final CheckpointState cps = entry.getValue(); + if (cps.inSync) { + cps.checkpointTimers.computeIfAbsent(checkpoint, ignored -> { + final ReplicationTimer replicationTimer = new ReplicationTimer(); + replicationTimer.start(); + return replicationTimer; + }); + logger.trace( + () -> new ParameterizedMessage( + "updated last published checkpoint to {} - timers [{}]", + checkpoint, + cps.checkpointTimers.keySet() + ) + ); + } + } + } + } + } + + /** + * Fetch stats on segment replication. + * @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group, + * V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group. + */ + public synchronized Set getSegmentReplicationStats() { + assert indexSettings.isSegRepEnabled(); + final ReplicationCheckpoint lastPublishedCheckpoint = this.lastPublishedReplicationCheckpoint; + if (primaryMode && lastPublishedCheckpoint != null) { + return this.checkpoints.entrySet() + .stream() + .filter(entry -> entry.getKey().equals(this.shardAllocationId) == false && entry.getValue().inSync) + .map(entry -> buildShardStats(lastPublishedCheckpoint.getLength(), entry.getKey(), entry.getValue())) + .collect(Collectors.toUnmodifiableSet()); + } + return Collections.emptySet(); + } + + private SegmentReplicationShardStats buildShardStats( + final long latestCheckpointLength, + final String allocationId, + final CheckpointState checkpointState + ) { + final Map checkpointTimers = checkpointState.checkpointTimers; + return new SegmentReplicationShardStats( + allocationId, + checkpointTimers.size(), + checkpointState.visibleReplicationCheckpoint == null + ? latestCheckpointLength + : Math.max(latestCheckpointLength - checkpointState.visibleReplicationCheckpoint.getLength(), 0), + checkpointTimers.values().stream().mapToLong(ReplicationTimer::time).max().orElse(0), + checkpointState.lastCompletedReplicationLag + ); + } + /** * Initializes the global checkpoint tracker in primary mode (see {@link #primaryMode}. Called on primary activation or promotion. */ diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index fb046e2310d93..66d095878d123 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -40,8 +40,8 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh && shard.state() != IndexShardState.CLOSED && shard.getReplicationTracker().isPrimaryMode()) { - publisher.publish(shard); + if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode()) { + publisher.publish(shard, shard.getLatestReplicationCheckpoint()); } } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index dd180b95e6b96..b95af8621493b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -107,6 +107,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.VersionType; import org.opensearch.index.cache.IndexCache; import org.opensearch.index.cache.bitset.ShardBitsetFilterCache; @@ -568,6 +569,12 @@ public void updateShardState( : "a primary relocation is completed by the cluster-managerr, but primary mode is not active " + currentRouting; changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); + + // Flush here after relocation of primary, so that replica get all changes from new primary rather than waiting for more + // docs to get indexed. + if (indexSettings.isSegRepEnabled()) { + flush(new FlushRequest().waitIfOngoing(true).force(true)); + } } else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() @@ -1423,9 +1430,17 @@ public GatedCloseable acquireLastIndexCommit(boolean flushFirst) th } } - public void finalizeReplication(SegmentInfos infos, long seqNo) throws IOException { + public Optional getReplicationEngine() { + if (getEngine() instanceof NRTReplicationEngine) { + return Optional.of((NRTReplicationEngine) getEngine()); + } else { + return Optional.empty(); + } + } + + public void finalizeReplication(SegmentInfos infos) throws IOException { if (getReplicationEngine().isPresent()) { - getReplicationEngine().get().updateSegments(infos, seqNo); + getReplicationEngine().get().updateSegments(infos); } } @@ -1486,8 +1501,12 @@ public Tuple, ReplicationCheckpoint> getLatestSegme this.shardId, getOperationPrimaryTerm(), segmentInfos.getGeneration(), - shardRouting.primary() ? getEngine().getMaxSeqNoFromSegmentInfos(segmentInfos) : getProcessedLocalCheckpoint(), - segmentInfos.getVersion() + segmentInfos.getVersion(), + // TODO: Update replicas to compute length from SegmentInfos. Replicas do not yet incref segments with + // getSegmentInfosSnapshot, so computing length from SegmentInfos can cause issues. + shardRouting.primary() + ? store.getSegmentMetadataMap(segmentInfos).values().stream().mapToLong(StoreFileMetadata::length).sum() + : store.stats(StoreStats.UNKNOWN_RESERVED_BYTES).getSizeInBytes() ) ); } catch (IOException e) { @@ -1510,7 +1529,7 @@ public boolean isSegmentReplicationAllowed() { return false; } if (this.routingEntry().primary()) { - logger.warn("Shard is marked as primary and cannot perform segment replication as a replica"); + logger.warn("Shard routing is marked primary thus cannot perform segment replication as replica"); return false; } if (state().equals(IndexShardState.STARTED) == false @@ -1734,6 +1753,10 @@ public void resetToWriteableEngine() throws IOException, InterruptedException, T indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { resetEngineToGlobalCheckpoint(); }); } + public void onCheckpointPublished(ReplicationCheckpoint checkpoint) { + replicationTracker.setLatestReplicationCheckpoint(checkpoint); + } + /** * Wrapper for a non-closing reader * @@ -2698,6 +2721,29 @@ public void updateGlobalCheckpointForShard(final String allocationId, final long replicationTracker.updateGlobalCheckpointForShard(allocationId, globalCheckpoint); } + /** + * Update the local knowledge of the visible global checkpoint for the specified allocation ID. + * + * @param allocationId the allocation ID to update the global checkpoint for + * @param visibleCheckpoint the visible checkpoint + */ + public void updateVisibleCheckpointForShard(final String allocationId, final ReplicationCheckpoint visibleCheckpoint) { + // Update target replication checkpoint only when in active primary mode + if (shardRouting.primary() && replicationTracker.isPrimaryMode()) { + verifyNotClosed(); + replicationTracker.updateVisibleCheckpointForShard(allocationId, visibleCheckpoint); + } + } + + /** + * Fetch stats on segment replication. + * @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group, + * V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group. + */ + public Set getReplicationStats() { + return replicationTracker.getSegmentReplicationStats(); + } + /** * Add a global checkpoint listener. If the global checkpoint is equal to or above the global checkpoint the listener is waiting for, * then the listener will be notified immediately via an executor (so possibly not on the current thread). If the specified timeout @@ -2935,14 +2981,6 @@ public long getProcessedLocalCheckpoint() { }); } - private Optional getReplicationEngine() { - if (getEngine() instanceof NRTReplicationEngine) { - return Optional.of((NRTReplicationEngine) getEngine()); - } else { - return Optional.empty(); - } - } - /** * Returns the global checkpoint for the shard. * diff --git a/server/src/main/java/org/opensearch/index/shard/ShardPath.java b/server/src/main/java/org/opensearch/index/shard/ShardPath.java index e67931844b773..6de0ffc88e0b7 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardPath.java @@ -135,7 +135,7 @@ public boolean isCustomDataPath() { */ public static ShardPath loadFileCachePath(NodeEnvironment env, ShardId shardId) { NodeEnvironment.NodePath path = env.fileCacheNodePath(); - final Path dataPath = env.resolveCustomLocation(path.fileCachePath.toString(), shardId); + final Path dataPath = env.resolveFileCacheLocation(path.fileCachePath, shardId); final Path statePath = path.resolve(shardId); return new ShardPath(true, dataPath, statePath, shardId); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java index d39b823df4d3c..2c7e66b9a121d 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java @@ -73,7 +73,8 @@ private Future createRemoteSnapshotDirectoryFromSnapsho ShardPath localShardPath, BlobStoreRepository blobStoreRepository ) throws IOException { - final BlobPath blobPath = new BlobPath().add("indices") + final BlobPath blobPath = blobStoreRepository.basePath() + .add("indices") .add(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings())) .add(Integer.toString(localShardPath.getShardId().getId())); final SnapshotId snapshotId = new SnapshotId( diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java index e97f093bb7703..7319a5324777a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java @@ -63,20 +63,13 @@ abstract class OnDemandBlockIndexInput extends IndexInput implements RandomAcces */ protected final boolean isClone; - // Variables needed for block calculation and fetching logic /** - * Block size shift (default value is 13 = 8KB) + * Variables used for block calculation and fetching. blockSize must be a + * power of two, and is defined as 2^blockShiftSize. blockMask is defined + * as blockSize - 1 and is used to calculate the offset within a block. */ protected final int blockSizeShift; - - /** - * Fixed block size - */ protected final int blockSize; - - /** - * Block mask - */ protected final int blockMask; /** @@ -380,8 +373,8 @@ public static Builder builder() { } public static class Builder { - // Block size shift (default value is 13 = 8KB) - public static final int DEFAULT_BLOCK_SIZE_SHIFT = 13; + // Block size shift (default value is 23 == 2^23 == 8MiB) + public static final int DEFAULT_BLOCK_SIZE_SHIFT = 23; public static final int DEFAULT_BLOCK_SIZE = 1 << DEFAULT_BLOCK_SIZE_SHIFT;; private String resourceDescription; diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java index 6a873d4531939..b3f8ee9c1817e 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java @@ -144,13 +144,7 @@ protected IndexInput fetchBlock(int blockId) throws IOException { .directory(directory) .fileName(blockFileName) .build(); - try { - return transferManager.fetchBlob(blobFetchRequest); - } catch (InterruptedException e) { - logger.error("Interrupted while fetching [{}]", blobFetchRequest); - Thread.currentThread().interrupt(); - throw new IllegalStateException(e); - } + return transferManager.fetchBlob(blobFetchRequest); } @Override diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 6b8816f5f3374..2f5693415216b 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -12,10 +12,16 @@ import org.opensearch.index.store.remote.utils.cache.RefCountedCache; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; import java.nio.file.Path; -import java.util.Map; +import java.util.List; import java.util.function.BiFunction; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION; + /** * File Cache (FC) is introduced to solve the problem that the local disk cannot hold * the entire dataset on remote store. It maintains a node level view of index files with priorities, @@ -45,21 +51,17 @@ public long capacity() { return theCache.capacity(); } + @Override public CachedIndexInput put(Path filePath, CachedIndexInput indexInput) { return theCache.put(filePath, indexInput); } @Override - public void putAll(Map m) { - theCache.putAll(m); - } - - @Override - public CachedIndexInput computeIfPresent( + public CachedIndexInput compute( Path key, BiFunction remappingFunction ) { - return theCache.computeIfPresent(key, remappingFunction); + return theCache.compute(key, remappingFunction); } /** @@ -84,11 +86,6 @@ public void remove(final Path filePath) { theCache.remove(filePath); } - @Override - public void removeAll(Iterable keys) { - theCache.removeAll(keys); - } - @Override public void clear() { theCache.clear(); @@ -123,4 +120,37 @@ public CacheUsage usage() { public CacheStats stats() { return theCache.stats(); } + + /** + * Restores the file cache instance performing a folder scan of the + * {@link org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory#LOCAL_STORE_LOCATION} + * directory within the provided file cache path. + */ + public void restoreFromDirectory(List fileCacheDataPaths) { + fileCacheDataPaths.stream() + .filter(Files::isDirectory) + .map(path -> path.resolve(LOCAL_STORE_LOCATION)) + .filter(Files::isDirectory) + .flatMap(dir -> { + try { + return Files.list(dir); + } catch (IOException e) { + throw new UncheckedIOException( + "Unable to process file cache directory. Please clear the file cache for node startup.", + e + ); + } + }) + .filter(Files::isRegularFile) + .forEach(path -> { + try { + put(path.toAbsolutePath(), new FileCachedIndexInput.ClosedIndexInput(Files.size(path))); + } catch (IOException e) { + throw new UncheckedIOException( + "Unable to retrieve cache file details. Please clear the file cache for node startup.", + e + ); + } + }); + } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInput.java index a062254736878..a75ed001f0db3 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInput.java @@ -161,4 +161,61 @@ public void close() throws IOException { public boolean isClosed() { return closed; } + + /** + * IndexInput instance which is utilized to fetch length for the input without opening the IndexInput. + */ + public static class ClosedIndexInput extends CachedIndexInput { + private final long length; + + public ClosedIndexInput(long length) { + super("ClosedIndexInput"); + this.length = length; + } + + @Override + public void close() throws IOException { + // No-Op + } + + @Override + public long getFilePointer() { + throw new UnsupportedOperationException("ClosedIndexInput doesn't support getFilePointer()."); + } + + @Override + public void seek(long pos) throws IOException { + throw new UnsupportedOperationException("ClosedIndexInput doesn't support seek()."); + } + + @Override + public long length() { + return length; + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException("ClosedIndexInput couldn't be sliced."); + } + + @Override + public byte readByte() throws IOException { + throw new UnsupportedOperationException("ClosedIndexInput doesn't support read."); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + throw new UnsupportedOperationException("ClosedIndexInput doesn't support read."); + } + + @Override + public IndexInput clone() { + throw new UnsupportedOperationException("ClosedIndexInput cannot be cloned."); + } + + @Override + public boolean isClosed() { + return true; + } + } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/ConcurrentInvocationLinearizer.java b/server/src/main/java/org/opensearch/index/store/remote/utils/ConcurrentInvocationLinearizer.java deleted file mode 100644 index 2b55377bc17b9..0000000000000 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/ConcurrentInvocationLinearizer.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.store.remote.utils; - -import java.io.IOException; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ExecutionException; - -import org.opensearch.common.CheckedFunction; - -/** - * A utility class which can be used to serialize concurrent invocations and to achieve "invoke simultaneously once at any time" - * semantic. This class does not implement any concurrency itself. When there is no concurrent access the work will be performed - * on the calling thread, though the result of that work will be shared with any concurrent requests for the same key. - * - * @param the method parameter type where this method invocation will be linearized - * @param return type of the method - * @opensearch.internal - */ -class ConcurrentInvocationLinearizer { - private final ConcurrentMap> invokeOnceCache = new ConcurrentHashMap<>(); - - /** - * Invokes the given function. If another thread is concurrently invoking the same function, as - * identified by the given input, then this call will block and return the result of that - * computation. Otherwise it will synchronously invoke the given function and return the result. - * @param input The input to uniquely identify this function - * @param function The function to invoke - * @return The result of the function - * @throws InterruptedException thrown if interrupted while blocking - * @throws IOException thrown from given function - */ - RET_TYPE linearize(METHOD_PARAM_TYPE input, CheckedFunction function) - throws InterruptedException, IOException { - try { - return linearizeInternal(input, function).get(); - } catch (ExecutionException e) { - if (e.getCause() instanceof IOException) { - throw (IOException) e.getCause(); - } else if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else if (e.getCause() instanceof Error) { - throw (Error) e.getCause(); - } - throw new RuntimeException("Unknown exception cause", e.getCause()); - } - } - - // Visible for testing - CompletableFuture linearizeInternal( - METHOD_PARAM_TYPE input, - CheckedFunction function - ) { - final CompletableFuture newFuture = new CompletableFuture<>(); - final CompletableFuture existing = invokeOnceCache.putIfAbsent(input, newFuture); - if (existing == null) { - // No concurrent work is happening for this key, so need to do the - // work and complete the future with the result. - try { - newFuture.complete(function.apply(input)); - } catch (Throwable e) { - newFuture.completeExceptionally(e); - } finally { - invokeOnceCache.remove(input); - } - return newFuture; - } else { - // Another thread is doing the work, so return the future to its result - return existing; - } - } - - // Visible for testing - Map> getInvokeOnceCache() { - return Collections.unmodifiableMap(invokeOnceCache); - } -} diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java index 6f015fe810daf..976827d582745 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java @@ -22,7 +22,6 @@ import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Objects; /** * This acts as entry point to fetch {@link BlobFetchRequest} and return actual {@link IndexInput}. Utilizes the BlobContainer interface to @@ -34,12 +33,10 @@ public class TransferManager { private static final Logger logger = LogManager.getLogger(TransferManager.class); private final BlobContainer blobContainer; - private final ConcurrentInvocationLinearizer invocationLinearizer; private final FileCache fileCache; public TransferManager(final BlobContainer blobContainer, final FileCache fileCache) { this.blobContainer = blobContainer; - this.invocationLinearizer = new ConcurrentInvocationLinearizer<>(); this.fileCache = fileCache; } @@ -48,53 +45,46 @@ public TransferManager(final BlobContainer blobContainer, final FileCache fileCa * @param blobFetchRequest to fetch * @return future of IndexInput augmented with internal caching maintenance tasks */ - public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws InterruptedException, IOException { - final IndexInput indexInput = invocationLinearizer.linearize( - blobFetchRequest.getFilePath(), - p -> fetchOriginBlob(blobFetchRequest) - ); - return indexInput.clone(); - } + public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOException { + final Path key = blobFetchRequest.getFilePath(); - /** - * Fetches the "origin" IndexInput from the cache, downloading it first if it is - * not already cached. This instance must be cloned before using. This method is - * accessed through the ConcurrentInvocationLinearizer so read-check-write is - * acceptable here - */ - private IndexInput fetchOriginBlob(BlobFetchRequest blobFetchRequest) throws IOException { - // check if the origin is already in block cache - IndexInput origin = fileCache.computeIfPresent(blobFetchRequest.getFilePath(), (path, cachedIndexInput) -> { - if (cachedIndexInput.isClosed()) { - // if it's already in the file cache, but closed, open it and replace the original one + final IndexInput origin = fileCache.compute(key, (path, cachedIndexInput) -> { + if (cachedIndexInput == null) { try { - IndexInput luceneIndexInput = blobFetchRequest.getDirectory().openInput(blobFetchRequest.getFileName(), IOContext.READ); - return new FileCachedIndexInput(fileCache, blobFetchRequest.getFilePath(), luceneIndexInput); - } catch (IOException ioe) { - logger.warn("Open index input " + blobFetchRequest.getFilePath() + " got error ", ioe); - // open failed so return null to download the file again + return new FileCachedIndexInput(fileCache, blobFetchRequest.getFilePath(), downloadBlockLocally(blobFetchRequest)); + } catch (IOException e) { + logger.warn("Failed to download " + blobFetchRequest.getFilePath(), e); return null; } - + } else { + if (cachedIndexInput.isClosed()) { + // if it's already in the file cache, but closed, open it and replace the original one + try { + final IndexInput luceneIndexInput = blobFetchRequest.getDirectory() + .openInput(blobFetchRequest.getFileName(), IOContext.READ); + return new FileCachedIndexInput(fileCache, blobFetchRequest.getFilePath(), luceneIndexInput); + } catch (IOException e) { + logger.warn("Failed to open existing file for " + blobFetchRequest.getFilePath(), e); + return null; + } + } + // already in the cache and ready to be used (open) + return cachedIndexInput; } - // already in the cache and ready to be used (open) - return cachedIndexInput; }); - if (Objects.isNull(origin)) { - // origin is not in file cache, download origin - - // open new origin - IndexInput downloaded = downloadBlockLocally(blobFetchRequest); - - // refcount = 0 at the beginning - FileCachedIndexInput newOrigin = new FileCachedIndexInput(fileCache, blobFetchRequest.getFilePath(), downloaded); + if (origin == null) { + throw new IOException("Failed to create IndexInput for " + blobFetchRequest.getFileName()); + } - // put origin into file cache - fileCache.put(blobFetchRequest.getFilePath(), newOrigin); - origin = newOrigin; + // Origin was either retrieved from the cache or newly added, either + // way the reference count has been incremented by one. We can only + // decrement this reference _after_ creating the clone to be returned. + try { + return origin.clone(); + } finally { + fileCache.decRef(key); } - return origin; } private IndexInput downloadBlockLocally(BlobFetchRequest blobFetchRequest) throws IOException { diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java index b9a9c063fde22..75b28baafe57f 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/LRUCache.java @@ -17,7 +17,6 @@ import org.opensearch.index.store.remote.utils.cache.stats.StatsCounter; import java.util.HashMap; -import java.util.Map; import java.util.Objects; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; @@ -137,9 +136,7 @@ public V get(K key) { return null; } // hit - if (node.evictable()) { - lru.moveToBack(node); - } + incRef(key); statsCounter.recordHits(key, 1); return node.value; } finally { @@ -147,46 +144,21 @@ public V get(K key) { } } - /** - * If put a new item to the cache, it's zero referenced. - * Otherwise, just replace the node with new value and new weight. - */ @Override public V put(K key, V value) { Objects.requireNonNull(key); Objects.requireNonNull(value); - final long weight = weigher.weightOf(value); final ReentrantLock lock = this.lock; lock.lock(); try { Node node = data.get(key); if (node != null) { final V oldValue = node.value; - final long oldWeight = node.weight; - // update the value and weight - node.value = value; - node.weight = weight; - // update usage - final long weightDiff = weight - oldWeight; - if (node.refCount > 0) { - activeUsage += weightDiff; - } - if (node.evictable()) { - lru.moveToBack(node); - } - usage += weightDiff; - // call listeners - statsCounter.recordReplacement(); - listener.onRemoval(new RemovalNotification<>(key, oldValue, RemovalReason.REPLACED)); - evict(); + replaceNode(node, value); return oldValue; } else { - Node newNode = new Node<>(key, value, weight); - data.put(key, newNode); - lru.add(newNode); - usage += weight; - evict(); + addNode(key, value); return null; } } finally { @@ -195,63 +167,34 @@ public V put(K key, V value) { } @Override - public void putAll(Map m) { - for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); - } - - @Override - public V computeIfPresent(K key, BiFunction remappingFunction) { + public V compute(K key, BiFunction remappingFunction) { Objects.requireNonNull(key); + Objects.requireNonNull(remappingFunction); final ReentrantLock lock = this.lock; lock.lock(); try { - Node node = data.get(key); - if (node != null && node.value != null) { - V v = remappingFunction.apply(key, node.value); - if (v != null) { - final V oldValue = node.value; - final long oldWeight = node.weight; - final long weight = weigher.weightOf(v); - // update the value and weight - node.value = v; - node.weight = weight; - - // update usage - final long weightDiff = weight - oldWeight; - if (node.evictable()) { - lru.moveToBack(node); - } - - if (node.refCount > 0) { - activeUsage += weightDiff; - } - - usage += weightDiff; - statsCounter.recordHits(key, 1); - if (oldValue != node.value) { - statsCounter.recordReplacement(); - listener.onRemoval(new RemovalNotification<>(node.key, oldValue, RemovalReason.REPLACED)); - } - evict(); - return v; + final Node node = data.get(key); + if (node == null) { + final V newValue = remappingFunction.apply(key, null); + if (newValue == null) { + // Remapping function asked for removal, but nothing to remove + return null; + } else { + addNode(key, newValue); + statsCounter.recordMisses(key, 1); + return newValue; + } + } else { + final V newValue = remappingFunction.apply(key, node.value); + if (newValue == null) { + removeNode(key); + return null; } else { - // is v is null, remove the item - data.remove(key); - if (node.refCount > 0) { - activeUsage -= node.weight; - } - usage -= node.weight; - if (node.evictable()) { - lru.remove(node); - } - statsCounter.recordRemoval(node.weight); - listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.EXPLICIT)); + statsCounter.recordHits(key, 1); + replaceNode(node, newValue); + return newValue; } } - - statsCounter.recordMisses(key, 1); - return null; } finally { lock.unlock(); } @@ -263,30 +206,12 @@ public void remove(K key) { final ReentrantLock lock = this.lock; lock.lock(); try { - Node node = data.remove(key); - if (node != null) { - if (node.refCount > 0) { - activeUsage -= node.weight; - } - usage -= node.weight; - if (node.evictable()) { - lru.remove(node); - } - statsCounter.recordRemoval(node.weight); - listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.EXPLICIT)); - } + removeNode(key); } finally { lock.unlock(); } } - @Override - public void removeAll(Iterable keys) { - for (K key : keys) { - remove(key); - } - } - @Override public void clear() { final ReentrantLock lock = this.lock; @@ -409,11 +334,56 @@ public CacheStats stats() { } } - boolean hasOverflowed() { + private void addNode(K key, V value) { + final long weight = weigher.weightOf(value); + Node newNode = new Node<>(key, value, weight); + data.put(key, newNode); + usage += weight; + incRef(key); + evict(); + } + + private void replaceNode(Node node, V newValue) { + if (node.value != newValue) { // replace if new value is not the same instance as existing value + final V oldValue = node.value; + final long oldWeight = node.weight; + final long newWeight = weigher.weightOf(newValue); + // update the value and weight + node.value = newValue; + node.weight = newWeight; + // update usage + final long weightDiff = newWeight - oldWeight; + if (node.refCount > 0) { + activeUsage += weightDiff; + } + usage += weightDiff; + statsCounter.recordReplacement(); + listener.onRemoval(new RemovalNotification<>(node.key, oldValue, RemovalReason.REPLACED)); + } + incRef(node.key); + evict(); + } + + private void removeNode(K key) { + Node node = data.remove(key); + if (node != null) { + if (node.refCount > 0) { + activeUsage -= node.weight; + } + usage -= node.weight; + if (node.evictable()) { + lru.remove(node); + } + statsCounter.recordRemoval(node.weight); + listener.onRemoval(new RemovalNotification<>(node.key, node.value, RemovalReason.EXPLICIT)); + } + } + + private boolean hasOverflowed() { return usage >= capacity; } - void evict() { + private void evict() { // Attempts to evict entries from the cache if it exceeds the maximum // capacity. while (hasOverflowed()) { diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java index 7964e41b4f48a..bbb37dc57ae7e 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/RefCountedCache.java @@ -10,14 +10,13 @@ import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; -import java.util.Map; import java.util.function.BiFunction; /** * Custom Cache which support typical cache operations (put, get, ...) and it support reference counting per individual key which might * change eviction behavior * @param type of the key - * @param type of th value + * @param type of the value * * @opensearch.internal */ @@ -25,7 +24,8 @@ public interface RefCountedCache { /** * Returns the value associated with {@code key} in this cache, or {@code null} if there is no - * cached value for {@code key}. + * cached value for {@code key}. Retrieving an item automatically increases its reference + * count. */ V get(K key); @@ -35,36 +35,28 @@ public interface RefCountedCache { */ V put(K key, V value); - /** - * Copies all the mappings from the specified map to the cache. The effect of this call is - * equivalent to that of calling {@code put(k, v)} on this map once for each mapping from key - * {@code k} to value {@code v} in the specified map. The behavior of this operation is undefined - * if the specified map is modified while the operation is in progress. - */ - void putAll(Map m); - /** * If the specified key is already associated with a value, attempts to update its value using the given mapping - * function and enters the new value into this map unless null. - * - * If the specified key is NOT already associated with a value, return null without applying the mapping function. - * + * function and enters the new value. If the mapping function returns null the item is removed from the + * cache, regardless of its reference count. If the mapping function returns non-null the value is updated. + * The new entry will have the reference count of the previous entry plus one, as this method automatically + * increases the reference count by one when it returns the newly mapped value. + *

+ * If the specified key is NOT already associated with a value, then the value of the remapping function + * will be associated with the given key, and its reference count will be set to one. If the remapping function + * returns null then nothing is done. + *

* The remappingFunction method for a given key will be invoked at most once. */ - V computeIfPresent(K key, BiFunction remappingFunction); + V compute(K key, BiFunction remappingFunction); /** - * Discards any cached value for key {@code key}. + * Discards any cached value for key {@code key}, regardless of reference count. */ void remove(K key); /** - * Discards any cached values for keys {@code keys}. - */ - void removeAll(Iterable keys); - - /** - * Discards all entries in the cache. + * Discards all entries in the cache, regardless of reference count. */ void clear(); @@ -83,6 +75,11 @@ public interface RefCountedCache { */ void decRef(K key); + /** + * Removes all cache entries with a reference count of zero, regardless of current capacity. + * + * @return The total weight of all removed entries. + */ long prune(); /** diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java index 04b0581b41ff5..42e44aa5f6a15 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/SegmentedCache.java @@ -13,7 +13,6 @@ import org.opensearch.common.cache.Weigher; import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; -import java.util.Map; import java.util.Objects; import java.util.function.BiFunction; @@ -91,15 +90,9 @@ public V put(K key, V value) { } @Override - public void putAll(Map m) { - for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); - } - - @Override - public V computeIfPresent(K key, BiFunction remappingFunction) { + public V compute(K key, BiFunction remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); - return segmentFor(key).computeIfPresent(key, remappingFunction); + return segmentFor(key).compute(key, remappingFunction); } @Override @@ -108,12 +101,6 @@ public void remove(K key) { segmentFor(key).remove(key); } - @Override - public void removeAll(Iterable keys) { - for (K k : keys) - remove(k); - } - @Override public void clear() { for (RefCountedCache cache : table) { diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java index 680dc441252dc..b096bb8d652ae 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/StatsCounter.java @@ -43,7 +43,7 @@ public interface StatsCounter { * Records the explicit removal of an entry from the cache. This should only been called when an entry is * removed as a result of manual * {@link RefCountedCache#remove(Object)} - * {@link RefCountedCache#computeIfPresent(Object, BiFunction)} + * {@link RefCountedCache#compute(Object, BiFunction)} * * @param weight the weight of the removed entry */ @@ -53,7 +53,7 @@ public interface StatsCounter { * Records the replacement of an entry from the cache. This should only been called when an entry is * replaced as a result of manual * {@link RefCountedCache#put(Object, Object)} - * {@link RefCountedCache#computeIfPresent(Object, BiFunction)} + * {@link RefCountedCache#compute(Object, BiFunction)} */ void recordReplacement(); diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 3318f6858dc82..f9ddb946b7e59 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -2016,11 +2016,15 @@ public static String createEmptyTranslog( EMPTY_TRANSLOG_BUFFER_SIZE, minTranslogGeneration, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, + () -> { + throw new UnsupportedOperationException(); + }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, new TragicExceptionHolder(), - seqNo -> { throw new UnsupportedOperationException(); }, + seqNo -> { + throw new UnsupportedOperationException(); + }, BigArrays.NON_RECYCLING_INSTANCE ); writer.close(); diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 1a97d334df58f..bf9e4607b695a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -8,6 +8,9 @@ package org.opensearch.indices.replication; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; @@ -26,6 +29,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -37,6 +41,8 @@ * @opensearch.internal */ class OngoingSegmentReplications { + + private static final Logger logger = LogManager.getLogger(OngoingSegmentReplications.class); private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; @@ -115,6 +121,10 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener { + if (segrepHandler != null) { + logger.warn("Override handler for allocation id {}", request.getTargetAllocationId()); + cancelHandlers(handler -> handler.getAllocationId().equals(request.getTargetAllocationId()), "cancel due to retry"); + } + return createTargetHandler(request.getTargetNode(), copyState, request.getTargetAllocationId(), fileChunkWriter); + }); return copyState; } @@ -163,8 +170,8 @@ synchronized void cancel(IndexShard shard, String reason) { /** * Cancel all Replication events for the given allocation ID, intended to be called when a primary is shutting down. * - * @param allocationId {@link String} - Allocation ID. - * @param reason {@link String} - Reason for the cancel + * @param allocationId {@link String} - Allocation ID. + * @param reason {@link String} - Reason for the cancel */ synchronized void cancel(String allocationId, String reason) { final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); @@ -195,6 +202,11 @@ int size() { return allocationIdToHandlers.size(); } + // Visible for tests. + Map getHandlers() { + return allocationIdToHandlers; + } + int cachedCopyStateSize() { return copyStateMap.size(); } @@ -254,8 +266,25 @@ private void cancelHandlers(Predicate p .filter(predicate) .map(SegmentReplicationSourceHandler::getAllocationId) .collect(Collectors.toList()); + if (allocationIds.size() == 0) { + return; + } + logger.warn(() -> new ParameterizedMessage("Cancelling replications for allocationIds {}", allocationIds)); for (String allocationId : allocationIds) { cancel(allocationId, reason); } } + + /** + * Clear copystate and target handlers for any non insync allocationIds. + * @param shardId {@link ShardId} + * @param inSyncAllocationIds {@link List} of in-sync allocation Ids. + */ + public void clearOutOfSyncIds(ShardId shardId, Set inSyncAllocationIds) { + cancelHandlers( + (handler) -> handler.getCopyState().getShard().shardId().equals(shardId) + && inSyncAllocationIds.contains(handler.getAllocationId()) == false, + "Shard is no longer in-sync with the primary" + ); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index f73028bee42f9..b211d81c1c76a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -13,7 +13,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; @@ -42,6 +41,7 @@ public class PrimaryShardReplicationSource implements SegmentReplicationSource { private final DiscoveryNode sourceNode; private final DiscoveryNode targetNode; private final String targetAllocationId; + private final RecoverySettings recoverySettings; public PrimaryShardReplicationSource( DiscoveryNode targetNode, @@ -59,6 +59,7 @@ public PrimaryShardReplicationSource( ); this.sourceNode = sourceNode; this.targetNode = targetNode; + this.recoverySettings = recoverySettings; } @Override @@ -83,17 +84,6 @@ public void getSegmentFiles( ) { final Writeable.Reader reader = GetSegmentFilesResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r); - // Few of the below assumptions and calculations are added for experimental release of segment replication feature in 2.3 - // version. These can change in upcoming releases. - - // Storing the size of files to fetch in bytes. - final long sizeOfSegmentFiles = filesToFetch.stream().mapToLong(file -> file.length()).sum(); - - // Maximum size of files to fetch (segment files) in bytes, that can be processed in 1 minute for a m5.xlarge machine. - long baseSegmentFilesSize = 100000000; - - // Formula for calculating time needed to process a replication event's files to fetch process - final long timeToGetSegmentFiles = 1 + (sizeOfSegmentFiles / baseSegmentFilesSize); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( replicationId, targetAllocationId, @@ -102,7 +92,7 @@ public void getSegmentFiles( checkpoint ); final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(TimeValue.timeValueMinutes(timeToGetSegmentFiles)) + .withTimeout(recoverySettings.internalActionLongTimeout()) .build(); transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, options, responseListener, reader); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 750e7629783e7..f9720991338c2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -159,6 +159,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene sendFileStep.whenComplete(r -> { try { + shard.updateVisibleCheckpointForShard(allocationId, copyState.getCheckpoint()); future.onResponse(new GetSegmentFilesResponse(List.of(storeFileMetadata))); } finally { IOUtils.close(resources); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 91b8243440ac5..247a0cdc0dc58 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -14,12 +14,14 @@ import org.opensearch.action.support.ChannelActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.component.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -35,6 +37,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; /** @@ -162,6 +165,19 @@ public void clusterChanged(ClusterChangedEvent event) { ongoingSegmentReplications.cancelReplication(removedNode); } } + // if a replica for one of the primary shards on this node has closed, + // we need to ensure its state has cleared up in ongoing replications. + if (event.routingTableChanged()) { + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + if (indexShard.routingEntry().primary()) { + final IndexMetadata indexMetadata = indexService.getIndexSettings().getIndexMetadata(); + final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(indexShard.shardId().id()); + ongoingSegmentReplications.clearOutOfSyncIds(indexShard.shardId(), inSyncAllocationIds); + } + } + } + } } @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 6f46fe8398388..9edafade883be 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -108,7 +108,7 @@ public String description() { @Override public void notifyListener(ReplicationFailedException e, boolean sendShardFailure) { - // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation + // Cancellations still are passed to our SegmentReplicationListener as failures, if we have failed because of cancellation // update the stage. final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); if (cancelledException != null) { @@ -184,15 +184,20 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener { cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); - multiFileWriter.renameAllTempFiles(); - final Store store = store(); - store.incRef(); try { + multiFileWriter.renameAllTempFiles(); + final Store store = store(); + store.incRef(); // Deserialize the new SegmentInfos object sent from the primary. final ReplicationCheckpoint responseCheckpoint = checkpointInfoResponse.getCheckpoint(); SegmentInfos infos = SegmentInfos.readCommit( @@ -220,7 +225,7 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, responseCheckpoint.getSegmentsGen() ); cancellableThreads.checkForCancel(); - indexShard.finalizeReplication(infos, responseCheckpoint.getSeqNo()); + indexShard.finalizeReplication(infos); store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", infos); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 9cb10a2c9699a..c507bccd0071d 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -227,9 +227,10 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe public void onReplicationDone(SegmentReplicationState state) { logger.trace( () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Replication complete, timing data: {}", + "[shardId {}] [replication id {}] Replication complete to {}, timing data: {}", replicaShard.shardId().getId(), state.getReplicationId(), + replicaShard.getLatestReplicationCheckpoint(), state.getTimingData() ) ); @@ -362,7 +363,7 @@ public void onFailure(Exception e) { completedReplications.put(target.shardId(), target); } } else { - onGoingReplications.fail(replicationId, new ReplicationFailedException("Segment Replication failed", e), true); + onGoingReplications.fail(replicationId, new ReplicationFailedException("Segment Replication failed", e), false); } } }); @@ -384,11 +385,21 @@ public void messageReceived(final FileChunkRequest request, TransportChannel cha } } + /** + * Force sync transport handler forces round of segment replication. Caller should verify necessary checks before + * calling this handler. + */ private class ForceSyncTransportRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final ForceSyncRequest request, TransportChannel channel, Task task) throws Exception { assert indicesService != null; final IndexShard indexShard = indicesService.getShardOrNull(request.getShardId()); + // Proceed with round of segment replication only when it is allowed + if (indexShard.getReplicationEngine().isEmpty()) { + logger.info("Ignore force segment replication sync as it is not allowed"); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + return; + } startReplication( ReplicationCheckpoint.empty(request.getShardId()), indexShard, @@ -397,9 +408,10 @@ public void messageReceived(final ForceSyncRequest request, TransportChannel cha public void onReplicationDone(SegmentReplicationState state) { logger.trace( () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Replication complete, timing data: {}", + "[shardId {}] [replication id {}] Replication complete to {}, timing data: {}", indexShard.shardId().getId(), state.getReplicationId(), + indexShard.getLatestReplicationCheckpoint(), state.getTimingData() ) ); diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 0fd934c31ef7f..9e54b210fea04 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -13,18 +13,12 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.replication.ReplicationMode; -import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.ReplicationTask; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.StreamInput; @@ -33,18 +27,22 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.List; import java.util.Objects; +import org.opensearch.action.support.replication.ReplicationMode; + /** * Replication action responsible for publishing checkpoint to a replica shard. * @@ -109,34 +107,36 @@ public ReplicationMode getReplicationMode(IndexShard indexShard) { /** * Publish checkpoint request to shard */ - final void publish(IndexShard indexShard) { + final void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint) { + String primaryAllocationId = indexShard.routingEntry().allocationId().getId(); long primaryTerm = indexShard.getPendingPrimaryTerm(); final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized threadContext.markAsSystemContext(); - PublishCheckpointRequest request = new PublishCheckpointRequest(indexShard.getLatestReplicationCheckpoint()); - final ReplicationCheckpoint checkpoint = request.getCheckpoint(); + PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); + final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); + final ReplicationTimer timer = new ReplicationTimer(); + timer.start(); + transportService.sendChildRequest( + indexShard.recoveryState().getTargetNode(), + transportPrimaryAction, + new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), + task, + transportOptions, + new TransportResponseHandler() { + @Override + public ReplicationResponse read(StreamInput in) throws IOException { + return newResponseInstance(in); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } - final List replicationTargets = indexShard.getReplicationGroup().getReplicationTargets(); - for (ShardRouting replicationTarget : replicationTargets) { - if (replicationTarget.primary()) { - continue; - } - final DiscoveryNode node = clusterService.state().nodes().get(replicationTarget.currentNodeId()); - final ConcreteReplicaRequest replicaRequest = new ConcreteReplicaRequest<>( - request, - replicationTarget.allocationId().getId(), - primaryTerm, - indexShard.getLastKnownGlobalCheckpoint(), - indexShard.getMaxSeqNoOfUpdatesOrDeletes() - ); - final ReplicationTimer timer = new ReplicationTimer(); - timer.start(); - final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); - ActionListener listener = new ActionListener<>() { @Override - public void onResponse(ReplicationOperation.ReplicaResponse replicaResponse) { + public void handleResponse(ReplicationResponse response) { timer.stop(); logger.trace( () -> new ParameterizedMessage( @@ -151,22 +151,20 @@ public void onResponse(ReplicationOperation.ReplicaResponse replicaResponse) { } @Override - public void onFailure(Exception e) { + public void handleException(TransportException e) { timer.stop(); logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); - if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { - // node shutting down - return; - } if (ExceptionsHelper.unwrap( e, + NodeClosedException.class, IndexNotFoundException.class, AlreadyClosedException.class, - IndexShardClosedException.class + IndexShardClosedException.class, + ShardNotInPrimaryModeException.class ) != null) { - // the index was deleted or the shard is closed + // Node is shutting down or the index was deleted or the shard is closed return; } logger.warn( @@ -174,13 +172,8 @@ public void onFailure(Exception e) { e ); } - }; - final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>( - listener, - ReplicaResponse::new - ); - transportService.sendChildRequest(node, transportReplicaAction, replicaRequest, task, transportOptions, handler); - } + } + ); logger.trace( () -> new ParameterizedMessage( "[shardId {}] Publishing replication checkpoint [{}]", @@ -197,7 +190,7 @@ protected void shardOperationOnPrimary( IndexShard primary, ActionListener> listener ) { - throw new OpenSearchException("PublishCheckpointAction should not hit primary shards"); + ActionListener.completeWith(listener, () -> new PrimaryResult<>(request, new ReplicationResponse())); } @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 6a4e5e449f178..57e667b06a223 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -28,8 +28,8 @@ public class ReplicationCheckpoint implements Writeable, Comparable routes() { - return org.opensearch.common.collect.List.of( - new Route(RestRequest.Method.GET, "/_data_stream"), - new Route(RestRequest.Method.GET, "/_data_stream/{name}") - ); + return List.of(new Route(RestRequest.Method.GET, "/_data_stream"), new Route(RestRequest.Method.GET, "/_data_stream/{name}")); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResolveIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResolveIndexAction.java index 687ff554bd8d4..eee9804abec3b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResolveIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResolveIndexAction.java @@ -57,7 +57,7 @@ public String getName() { @Override public List routes() { - return org.opensearch.common.collect.List.of(new Route(RestRequest.Method.GET, "/_resolve/index/{name}")); + return List.of(new Route(RestRequest.Method.GET, "/_resolve/index/{name}")); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java index 6edb806007b5e..edb3f12f339ba 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java @@ -58,7 +58,7 @@ public class RestSimulateIndexTemplateAction extends BaseRestHandler { @Override public List routes() { - return org.opensearch.common.collect.List.of(new Route(POST, "/_index_template/_simulate_index/{name}")); + return List.of(new Route(POST, "/_index_template/_simulate_index/{name}")); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java index 15070c723c2c8..0130f9cd14c36 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java @@ -15,8 +15,11 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.Strings; import org.opensearch.common.Table; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentOpenSearchExtension; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestHandler; @@ -24,9 +27,10 @@ import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; -import java.util.Comparator; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.Set; import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; @@ -67,12 +71,11 @@ public BaseRestHandler.RestChannelConsumer doCatRequest(final RestRequest reques segmentReplicationStatsRequest.detailed(request.paramAsBoolean("detailed", false)); segmentReplicationStatsRequest.shards(Strings.splitStringByCommaToArray(request.param("shards"))); segmentReplicationStatsRequest.activeOnly(request.paramAsBoolean("active_only", false)); - segmentReplicationStatsRequest.completedOnly(request.paramAsBoolean("completed_only", false)); segmentReplicationStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, segmentReplicationStatsRequest.indicesOptions())); return channel -> client.admin() .indices() - .segmentReplicationStats(segmentReplicationStatsRequest, new RestResponseListener(channel) { + .segmentReplicationStats(segmentReplicationStatsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(final SegmentReplicationStatsResponse response) throws Exception { return RestTable.buildResponse(buildSegmentReplicationTable(request, response), channel); @@ -90,22 +93,23 @@ protected Table getTableWithHeader(RestRequest request) { Table t = new Table(); t.startHeaders() - .addCell("index", "alias:i,idx;desc:index name") .addCell("shardId", "alias:s;desc: shard Id") - .addCell("time", "alias:t,ti;desc:segment replication time") - .addCell("stage", "alias:st;desc:segment replication stage") - .addCell("source_description", "alias:sdesc;desc:source description") - .addCell("target_host", "alias:thost;desc:target host") .addCell("target_node", "alias:tnode;desc:target node name") - .addCell("files_fetched", "alias:ff;desc:files fetched") - .addCell("files_percent", "alias:fp;desc:percent of files fetched") - .addCell("bytes_fetched", "alias:bf;desc:bytes fetched") - .addCell("bytes_percent", "alias:bp;desc:percent of bytes fetched"); + .addCell("target_host", "alias:thost;desc:target host") + .addCell("checkpoints_behind", "alias:cpb;desc:checkpoints behind primary") + .addCell("bytes_behind", "alias:bb;desc:bytes behind primary") + .addCell("current_lag", "alias:clag;desc:ongoing time elapsed waiting for replica to catch up to primary") + .addCell("last_completed_lag", "alias:lcl;desc:time taken for replica to catch up to latest primary refresh") + .addCell("rejected_requests", "alias:rr;desc:count of rejected requests for the replication group"); if (detailed) { - t.addCell("start_time", "alias:start;desc:segment replication start time") - .addCell("start_time_millis", "alias:start_millis;desc:segment replication start time in epoch milliseconds") + t.addCell("stage", "alias:st;desc:segment replication event stage") + .addCell("time", "alias:t,ti;desc:current replication event time") + .addCell("files_fetched", "alias:ff;desc:files fetched") + .addCell("files_percent", "alias:fp;desc:percent of files fetched") + .addCell("bytes_fetched", "alias:bf;desc:bytes fetched") + .addCell("bytes_percent", "alias:bp;desc:percent of bytes fetched") + .addCell("start_time", "alias:start;desc:segment replication start time") .addCell("stop_time", "alias:stop;desc:segment replication stop time") - .addCell("stop_time_millis", "alias:stop_millis;desc:segment replication stop time in epoch milliseconds") .addCell("files", "alias:f;desc:number of files to fetch") .addCell("files_total", "alias:tf;desc:total number of files") .addCell("bytes", "alias:b;desc:number of bytes to fetch") @@ -135,58 +139,61 @@ public Table buildSegmentReplicationTable(RestRequest request, SegmentReplicatio } Table t = getTableWithHeader(request); - for (String index : response.shardSegmentReplicationStates().keySet()) { + for (Map.Entry> entry : response.getReplicationStats().entrySet()) { + final List replicationPerGroupStats = entry.getValue(); - List shardSegmentReplicationStates = response.shardSegmentReplicationStates().get(index); - if (shardSegmentReplicationStates.size() == 0) { + if (replicationPerGroupStats.isEmpty()) { continue; } // Sort ascending by shard id for readability - CollectionUtil.introSort(shardSegmentReplicationStates, new Comparator() { - @Override - public int compare(SegmentReplicationState o1, SegmentReplicationState o2) { - int id1 = o1.getShardRouting().shardId().id(); - int id2 = o2.getShardRouting().shardId().id(); - if (id1 < id2) { - return -1; - } else if (id1 > id2) { - return 1; - } else { - return 0; - } - } + CollectionUtil.introSort(replicationPerGroupStats, (o1, o2) -> { + int id1 = o1.getShardId().id(); + int id2 = o2.getShardId().id(); + return Integer.compare(id1, id2); }); - for (SegmentReplicationState state : shardSegmentReplicationStates) { - t.startRow(); - t.addCell(index); - t.addCell(state.getShardRouting().shardId().id()); - t.addCell(new TimeValue(state.getTimer().time())); - t.addCell(state.getStage().toString().toLowerCase(Locale.ROOT)); - t.addCell(state.getSourceDescription()); - t.addCell(state.getTargetNode().getHostName()); - t.addCell(state.getTargetNode().getName()); - t.addCell(state.getIndex().recoveredFileCount()); - t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); - t.addCell(state.getIndex().recoveredBytes()); - t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); - if (detailed) { - t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime())); - t.addCell(state.getTimer().startTime()); - t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime())); - t.addCell(state.getTimer().stopTime()); - t.addCell(state.getIndex().totalRecoverFiles()); - t.addCell(state.getIndex().totalFileCount()); - t.addCell(state.getIndex().totalRecoverBytes()); - t.addCell(state.getIndex().totalBytes()); - t.addCell(state.getReplicatingStageTime()); - t.addCell(state.getGetCheckpointInfoStageTime()); - t.addCell(state.getFileDiffStageTime()); - t.addCell(state.getGetFileStageTime()); - t.addCell(state.getFinalizeReplicationStageTime()); + for (SegmentReplicationPerGroupStats perGroupStats : replicationPerGroupStats) { + + final Set replicaShardStats = perGroupStats.getReplicaStats(); + + for (SegmentReplicationShardStats shardStats : replicaShardStats) { + final SegmentReplicationState state = shardStats.getCurrentReplicationState(); + if (state == null) { + continue; + } + + t.startRow(); + t.addCell(perGroupStats.getShardId()); + // these nulls should never happen, here for safety. + t.addCell(state.getTargetNode().getName()); + t.addCell(state.getTargetNode().getHostName()); + t.addCell(shardStats.getCheckpointsBehindCount()); + t.addCell(new ByteSizeValue(shardStats.getBytesBehindCount())); + t.addCell(new TimeValue(shardStats.getCurrentReplicationTimeMillis())); + t.addCell(new TimeValue(shardStats.getLastCompletedReplicationTimeMillis())); + t.addCell(perGroupStats.getRejectedRequestCount()); + if (detailed) { + t.addCell(state.getStage().toString().toLowerCase(Locale.ROOT)); + t.addCell(new TimeValue(state.getTimer().time())); + t.addCell(state.getIndex().recoveredFileCount()); + t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); + t.addCell(state.getIndex().recoveredBytes()); + t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); + t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime())); + t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime())); + t.addCell(state.getIndex().totalRecoverFiles()); + t.addCell(state.getIndex().totalFileCount()); + t.addCell(new ByteSizeValue(state.getIndex().totalRecoverBytes())); + t.addCell(new ByteSizeValue(state.getIndex().totalBytes())); + t.addCell(state.getReplicatingStageTime()); + t.addCell(state.getGetCheckpointInfoStageTime()); + t.addCell(state.getFileDiffStageTime()); + t.addCell(state.getGetFileStageTime()); + t.addCell(state.getFinalizeReplicationStageTime()); + } + t.endRow(); } - t.endRow(); } } diff --git a/server/src/main/java/org/opensearch/script/AbstractSortScript.java b/server/src/main/java/org/opensearch/script/AbstractSortScript.java index c72717f8bc927..ac39be1074efa 100644 --- a/server/src/main/java/org/opensearch/script/AbstractSortScript.java +++ b/server/src/main/java/org/opensearch/script/AbstractSortScript.java @@ -54,7 +54,7 @@ abstract class AbstractSortScript implements ScorerAware { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("doc", value -> { + private static final Map> PARAMS_FUNCTIONS = Map.of("doc", value -> { deprecationLogger.deprecate( "sort-script_doc", "Accessing variable [doc] via [params.doc] from within an sort-script " + "is deprecated in favor of directly accessing [doc]." diff --git a/server/src/main/java/org/opensearch/script/AggregationScript.java b/server/src/main/java/org/opensearch/script/AggregationScript.java index 4c9d6060ddbfd..ef300e9473d3e 100644 --- a/server/src/main/java/org/opensearch/script/AggregationScript.java +++ b/server/src/main/java/org/opensearch/script/AggregationScript.java @@ -58,7 +58,7 @@ public abstract class AggregationScript implements ScorerAware { public static final ScriptContext CONTEXT = new ScriptContext<>("aggs", Factory.class); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("doc", value -> { + private static final Map> PARAMS_FUNCTIONS = Map.of("doc", value -> { deprecationLogger.deprecate( "aggregation-script_doc", "Accessing variable [doc] via [params.doc] from within an aggregation-script " diff --git a/server/src/main/java/org/opensearch/script/FieldScript.java b/server/src/main/java/org/opensearch/script/FieldScript.java index 531616a74dce9..82b5c9a088a2c 100644 --- a/server/src/main/java/org/opensearch/script/FieldScript.java +++ b/server/src/main/java/org/opensearch/script/FieldScript.java @@ -54,7 +54,7 @@ public abstract class FieldScript { public static final String[] PARAMETERS = {}; private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("doc", value -> { + private static final Map> PARAMS_FUNCTIONS = Map.of("doc", value -> { deprecationLogger.deprecate( "field-script_doc", "Accessing variable [doc] via [params.doc] from within an field-script " + "is deprecated in favor of directly accessing [doc]." diff --git a/server/src/main/java/org/opensearch/script/ScoreScript.java b/server/src/main/java/org/opensearch/script/ScoreScript.java index dba44995b18f0..5c6553ffc2a28 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScript.java +++ b/server/src/main/java/org/opensearch/script/ScoreScript.java @@ -84,7 +84,7 @@ public Explanation get(double score, Explanation subQueryExplanation) { } private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("doc", value -> { + private static final Map> PARAMS_FUNCTIONS = Map.of("doc", value -> { deprecationLogger.deprecate( "score-script_doc", "Accessing variable [doc] via [params.doc] from within an score-script " + "is deprecated in favor of directly accessing [doc]." diff --git a/server/src/main/java/org/opensearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/opensearch/script/ScriptedMetricAggContexts.java index 58d0f7b4f6040..2f61fd75471ad 100644 --- a/server/src/main/java/org/opensearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/opensearch/script/ScriptedMetricAggContexts.java @@ -99,7 +99,7 @@ public interface Factory extends ScriptFactory { public abstract static class MapScript { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("doc", value -> { + private static final Map> PARAMS_FUNCTIONS = Map.of("doc", value -> { deprecationLogger.deprecate( "map-script_doc", "Accessing variable [doc] via [params.doc] from within an scripted metric agg map script " diff --git a/server/src/main/java/org/opensearch/script/TermsSetQueryScript.java b/server/src/main/java/org/opensearch/script/TermsSetQueryScript.java index 02e361b0f5415..99fa2d584ebfd 100644 --- a/server/src/main/java/org/opensearch/script/TermsSetQueryScript.java +++ b/server/src/main/java/org/opensearch/script/TermsSetQueryScript.java @@ -55,7 +55,7 @@ public abstract class TermsSetQueryScript { public static final ScriptContext CONTEXT = new ScriptContext<>("terms_set", Factory.class); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("doc", value -> { + private static final Map> PARAMS_FUNCTIONS = Map.of("doc", value -> { deprecationLogger.deprecate( "terms-set-query-script_doc", "Accessing variable [doc] via [params.doc] from within an terms-set-query-script " diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 84c46e400543a..07ff5405211ff 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -302,6 +302,27 @@ public double parseDouble(String value, boolean roundUp, LongSupplier now) { public String toString() { return "DocValueFormat.DateTime(" + formatter + ", " + timeZone + ", " + resolution + ")"; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DateTime that = (DateTime) o; + + return Objects.equals(formatter, that.formatter) + && Objects.equals(timeZone, that.timeZone) + && Objects.equals(resolution, that.resolution); + } + + @Override + public int hashCode() { + return Objects.hash(formatter, timeZone, resolution); + } } DocValueFormat GEOHASH = new DocValueFormat() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 1d3a96e66885b..2fb3ea094b0ab 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -63,6 +63,7 @@ import java.io.IOException; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.List; import java.util.Objects; import java.util.function.LongConsumer; @@ -296,7 +297,7 @@ public DateHistogramValuesSourceBuilder offset(long offset) { public static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC), + List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC), (valuesSourceConfig, rounding, name, hasScript, format, missingBucket, missingOrder, order) -> { ValuesSource.Numeric numeric = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); // TODO once composite is plugged in to the values source registry or at least understands Date values source types use it diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java index d05c78f540641..970f07b6a9d74 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -152,11 +152,9 @@ void setAfter(Comparable value) { } else if (value instanceof Number) { afterValue = ((Number) value).doubleValue(); } else { - afterValue = format.parseDouble( - value.toString(), - false, - () -> { throw new IllegalArgumentException("now() is not supported in [after] key"); } - ); + afterValue = format.parseDouble(value.toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index 7f1393f7c8fb2..05eba71bbdbd5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -51,6 +51,7 @@ import org.opensearch.search.sort.SortOrder; import java.io.IOException; +import java.util.List; import java.util.Objects; import java.util.function.LongConsumer; @@ -100,7 +101,7 @@ static HistogramValuesSourceBuilder parse(String name, XContentParser parser) th public static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC), + List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC), (valuesSourceConfig, interval, name, hasScript, format, missingBucket, missingOrder, order) -> { ValuesSource.Numeric numeric = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java index ec6410c2a9377..8f0e99b187c2d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -170,11 +170,9 @@ protected void setAfter(Comparable value) { afterValue = null; } else { // parse the value from a string in case it is a date or a formatted unsigned long. - afterValue = format.parseLong( - value.toString(), - false, - () -> { throw new IllegalArgumentException("now() is not supported in [after] key"); } - ); + afterValue = format.parseLong(value.toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 64b2635ea7fee..c87c5eef157fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -52,6 +52,7 @@ import org.opensearch.search.sort.SortOrder; import java.io.IOException; +import java.util.List; import java.util.function.LongConsumer; import java.util.function.LongUnaryOperator; @@ -118,7 +119,7 @@ public String type() { static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.DATE, CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN), (valuesSourceConfig, name, hasScript, format, missingBucket, missingOrder, order) -> { final DocValueFormat docValueFormat; if (format == null && valuesSourceConfig.valueSourceType() == CoreValuesSourceType.DATE) { @@ -180,7 +181,7 @@ static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), + List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), (valuesSourceConfig, name, hasScript, format, missingBucket, missingOrder, order) -> new CompositeValuesSourceConfig( name, valuesSourceConfig.fieldType(), diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 295dfec639de6..040621ce8ec34 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -162,34 +162,29 @@ public void collect(int doc, long owningBucketOrd) throws IOException { @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - return buildAggregationsForVariableBuckets( - owningBucketOrds, - bucketOrds, - (bucketValue, docCount, subAggregationResults) -> { - return new InternalDateHistogram.Bucket(bucketValue, docCount, keyed, formatter, subAggregationResults); - }, - (owningBucketOrd, buckets) -> { - // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order - CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); - - // value source will be null for unmapped fields - // Important: use `rounding` here, not `shardRounding` - InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 - ? new InternalDateHistogram.EmptyBucketInfo(rounding.withoutOffset(), buildEmptySubAggregations(), extendedBounds) - : null; - return new InternalDateHistogram( - name, - buckets, - order, - minDocCount, - rounding.offset(), - emptyBucketInfo, - formatter, - keyed, - metadata() - ); - } - ); + return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds, (bucketValue, docCount, subAggregationResults) -> { + return new InternalDateHistogram.Bucket(bucketValue, docCount, keyed, formatter, subAggregationResults); + }, (owningBucketOrd, buckets) -> { + // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); + + // value source will be null for unmapped fields + // Important: use `rounding` here, not `shardRounding` + InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 + ? new InternalDateHistogram.EmptyBucketInfo(rounding.withoutOffset(), buildEmptySubAggregations(), extendedBounds) + : null; + return new InternalDateHistogram( + name, + buckets, + order, + minDocCount, + rounding.offset(), + emptyBucketInfo, + formatter, + keyed, + metadata() + ); + }); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index eb22a857643c4..dd74d83c665de 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.bucket.histogram; import org.opensearch.common.Rounding; -import org.opensearch.common.collect.List; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -47,6 +46,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 722bf87201631..321c16cdba970 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.common.collect.List; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -46,6 +45,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index a91ddf2e64879..fdf047c13480a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -482,10 +482,7 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult current, ReduceCon Bucket lastBucket = null; ListIterator iter = list.listIterator(); - InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce( - org.opensearch.common.collect.List.of(bucketInfo.emptySubAggregations), - reduceContext - ); + InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(List.of(bucketInfo.emptySubAggregations), reduceContext); // Add the empty buckets within the data, // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6 diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index 90ae2e994caf1..bfd7845e7e16f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -47,6 +47,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -67,7 +68,7 @@ public static void registerAggregators( ) { builder.register( registryKey, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), RangeAggregator::new, true ); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index f8d3040620a51..41ef823a375c0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -47,6 +47,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -59,7 +60,7 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory public static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( DiversifiedAggregationBuilder.REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), ( String name, int shardSize, diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java index 8a061c0fbf512..01975381f15a4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java @@ -50,21 +50,14 @@ public class MultiTermsAggregationFactory extends AggregatorFactory { private final boolean showTermDocCountError; public static void registerAggregators(ValuesSourceRegistry.Builder builder) { - builder.register( - REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), - config -> { - final IncludeExclude.StringFilter filter = config.v2() == null - ? null - : config.v2().convertToStringFilter(config.v1().format()); - return MultiTermsAggregator.InternalValuesSourceFactory.bytesValuesSource(config.v1().getValuesSource(), filter); - }, - true - ); + builder.register(REGISTRY_KEY, List.of(CoreValuesSourceType.BYTES, CoreValuesSourceType.IP), config -> { + final IncludeExclude.StringFilter filter = config.v2() == null ? null : config.v2().convertToStringFilter(config.v1().format()); + return MultiTermsAggregator.InternalValuesSourceFactory.bytesValuesSource(config.v1().getValuesSource(), filter); + }, true); builder.register( REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.DATE), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BOOLEAN, CoreValuesSourceType.DATE), config -> { ValuesSourceConfig valuesSourceConfig = config.v1(); IncludeExclude includeExclude = config.v2(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java index 45898a689a605..c21bfd6fb73b9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -188,7 +188,7 @@ InternalMultiTerms buildResult(long owningBucketOrd, long otherDocCount, Interna otherDocCount, 0, formats, - org.opensearch.common.collect.List.of(topBuckets) + List.of(topBuckets) ); } @@ -341,7 +341,7 @@ private void apply( List> results ) throws IOException { if (index == collectedValues.size()) { - results.add(org.opensearch.common.collect.List.copyOf(current)); + results.add(List.copyOf(current)); } else if (null != collectedValues.get(index)) { for (Object value : collectedValues.get(index).get()) { current.add(value); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index 2c4c502b86695..70e98c0d19cd7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.PriorityQueue; -import org.opensearch.common.collect.List; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongArray; @@ -62,6 +61,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 50b1ae001db97..ad57a07a4a8f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -444,7 +444,7 @@ Aggregator create( && ordinalsValuesSource.supportsGlobalOrdinalsMapping() && // we use the static COLLECT_SEGMENT_ORDS to allow tests to force specific optimizations - (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) { + (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) { /* * We can use the low cardinality execution mode iff this aggregator: * - has no sub-aggregator AND diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java index 727304f8a5254..75419b7c64b12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -67,7 +68,7 @@ class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( AvgAggregationBuilder.REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), AvgAggregator::new, true ); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java index 3ec24ad04d9aa..96f1af94f2d07 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -56,7 +57,7 @@ class MaxAggregatorFactory extends ValuesSourceAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( MaxAggregationBuilder.REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), MaxAggregator::new, true ); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java index 1b24b88d6f068..b117f70c81baf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -56,7 +57,7 @@ class MinAggregatorFactory extends ValuesSourceAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( MinAggregationBuilder.REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), MinAggregator::new, true ); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index 7b5848fc79197..5c831d60f75a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -103,7 +103,7 @@ public Aggregator createInternal( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - Map aggParams = this.aggParams == null ? org.opensearch.common.collect.Map.of() : this.aggParams; + Map aggParams = this.aggParams == null ? Map.of() : this.aggParams; Script reduceScript = deepCopyScript(this.reduceScript, searchContext, aggParams); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java index 6e343ed9a31d1..0c10df174efa0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -67,7 +68,7 @@ class StatsAggregatorFactory extends ValuesSourceAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( StatsAggregationBuilder.REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), StatsAggregator::new, true ); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java index c94949bca09e6..b3506ff958833 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -44,6 +44,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -67,7 +68,7 @@ class SumAggregatorFactory extends ValuesSourceAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( SumAggregationBuilder.REGISTRY_KEY, - org.opensearch.common.collect.List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), + List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN), SumAggregator::new, true ); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java index 3603d1c5a0c58..fd9caf59d9e01 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java @@ -120,9 +120,9 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext if (xAxisUnits != null) { xDiff = (thisBucketKey.doubleValue() - lastBucketKey.doubleValue()) / xAxisUnits; } - final List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) - .map((p) -> { return (InternalAggregation) p; }) - .collect(Collectors.toList()); + final List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> { + return (InternalAggregation) p; + }).collect(Collectors.toList()); aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, metadata())); Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), InternalAggregations.from(aggs)); newBuckets.add(newBucket); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java index 0b15124f08cee..9988abb778bce 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsPhase.java @@ -96,7 +96,7 @@ public void process(HitContext hitContext) throws IOException { private Set getIgnoredFields(SearchHit hit) { DocumentField field = hit.field(IgnoredFieldMapper.NAME); if (field == null) { - return org.opensearch.common.collect.Set.of(); + return Set.of(); } Set ignoredFields = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java index c05c574b77942..82b060fb421f8 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java @@ -91,8 +91,8 @@ public ProfileResult( this.type = type; this.description = description; this.breakdown = Objects.requireNonNull(breakdown, "required breakdown argument missing"); - this.debug = debug == null ? org.opensearch.common.collect.Map.of() : debug; - this.children = children == null ? org.opensearch.common.collect.List.of() : children; + this.debug = debug == null ? Map.of() : debug; + this.children = children == null ? List.of() : children; this.nodeTime = nodeTime; } @@ -107,7 +107,7 @@ public ProfileResult(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { debug = in.readMap(StreamInput::readString, StreamInput::readGenericValue); } else { - debug = org.opensearch.common.collect.Map.of(); + debug = Map.of(); } children = in.readList(ProfileResult::new); } diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index bcf0b704374c9..60d0e9d15215a 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -123,6 +123,11 @@ grant codeBase "${codebase.junit-rt.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; +grant codeBase "${codebase.idea_rt.jar}" { + // allows IntelliJ IDEA (2022.3.3) JUnit test runner to control number of test iterations + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; +}; + grant codeBase "file:${gradle.dist.lib}/-" { // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production // dependency and there's no point in exercising the security policy against it diff --git a/server/src/test/java/org/opensearch/action/ActionListenerTests.java b/server/src/test/java/org/opensearch/action/ActionListenerTests.java index bf64b4ed8a1a5..e56deb6088722 100644 --- a/server/src/test/java/org/opensearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/opensearch/action/ActionListenerTests.java @@ -266,10 +266,9 @@ public void onFailure(Exception e) { assertThat(assertionError.getCause(), instanceOf(IllegalArgumentException.class)); assertNull(exReference.get()); - assertionError = expectThrows( - AssertionError.class, - () -> ActionListener.completeWith(listener, () -> { throw new IllegalArgumentException(); }) - ); + assertionError = expectThrows(AssertionError.class, () -> ActionListener.completeWith(listener, () -> { + throw new IllegalArgumentException(); + })); assertThat(assertionError.getCause(), instanceOf(IllegalArgumentException.class)); assertThat(exReference.get(), instanceOf(IllegalArgumentException.class)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index 03c6b5ab822c1..40a041e9b94e3 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -386,7 +386,9 @@ public void testExcludeAbsentNodesByNodeIds() throws InterruptedException { Strings.EMPTY_ARRAY, TimeValue.timeValueSeconds(30) ), - expectSuccess(e -> { countDownLatch.countDown(); }) + expectSuccess(e -> { + countDownLatch.countDown(); + }) ); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); @@ -430,7 +432,9 @@ public void testExcludeAbsentNodesByNodeNames() throws InterruptedException { localNode, AddVotingConfigExclusionsAction.NAME, new AddVotingConfigExclusionsRequest("absent_node"), - expectSuccess(e -> { countDownLatch.countDown(); }) + expectSuccess(e -> { + countDownLatch.countDown(); + }) ); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java index 17cc539a5d561..f575be74a3e9b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -380,10 +380,9 @@ public void testGetIndexAnalyserWithoutIndexAnalyzers() { public void testGetFieldAnalyzerWithoutIndexAnalyzers() { AnalyzeAction.Request req = new AnalyzeAction.Request().field("field").text("text"); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { TransportAnalyzeAction.analyze(req, registry, null, maxTokenCount); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + TransportAnalyzeAction.analyze(req, registry, null, maxTokenCount); + }); assertEquals(e.getMessage(), "analysis based on a specific field requires an index"); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java index 5bb9155a5a84c..89ae48d4cd854 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java @@ -74,10 +74,9 @@ public void tearDown() throws Exception { public void testSetSource() throws IOException { CreateIndexRequestBuilder builder = new CreateIndexRequestBuilder(this.testClient, CreateIndexAction.INSTANCE); - OpenSearchParseException e = expectThrows( - OpenSearchParseException.class, - () -> { builder.setSource("{\"" + KEY + "\" : \"" + VALUE + "\"}", XContentType.JSON); } - ); + OpenSearchParseException e = expectThrows(OpenSearchParseException.class, () -> { + builder.setSource("{\"" + KEY + "\" : \"" + VALUE + "\"}", XContentType.JSON); + }); assertEquals(String.format(Locale.ROOT, "unknown key [%s] for create index", KEY), e.getMessage()); builder.setSource("{\"settings\" : {\"" + KEY + "\" : \"" + VALUE + "\"}}", XContentType.JSON); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java index b6e143d28b4aa..e33b873a52f19 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java @@ -95,11 +95,8 @@ public void testValidateRequestWithoutName() { public void testDeleteDataStream() { final String dataStreamName = "my-data-stream"; - final List otherIndices = randomSubsetOf(org.opensearch.common.collect.List.of("foo", "bar", "baz")); - ClusterState cs = getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamName, 2)), - otherIndices - ); + final List otherIndices = randomSubsetOf(List.of("foo", "bar", "baz")); + ClusterState cs = getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 2)), otherIndices); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); ClusterState newState = DeleteDataStreamAction.TransportAction.removeDataStream(getMetadataDeleteIndexService(), cs, req); assertThat(newState.metadata().dataStreams().size(), equalTo(0)); @@ -112,13 +109,13 @@ public void testDeleteDataStream() { public void testDeleteMultipleDataStreams() { String[] dataStreamNames = { "foo", "bar", "baz", "eggplant" }; ClusterState cs = getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of( + List.of( new Tuple<>(dataStreamNames[0], randomIntBetween(1, 3)), new Tuple<>(dataStreamNames[1], randomIntBetween(1, 3)), new Tuple<>(dataStreamNames[2], randomIntBetween(1, 3)), new Tuple<>(dataStreamNames[3], randomIntBetween(1, 3)) ), - org.opensearch.common.collect.List.of() + List.of() ); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { "ba*", "eggplant" }); @@ -182,13 +179,13 @@ public void testDeleteNonexistentDataStream() { final String dataStreamName = "my-data-stream"; String[] dataStreamNames = { "foo", "bar", "baz", "eggplant" }; ClusterState cs = getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of( + List.of( new Tuple<>(dataStreamNames[0], randomIntBetween(1, 3)), new Tuple<>(dataStreamNames[1], randomIntBetween(1, 3)), new Tuple<>(dataStreamNames[2], randomIntBetween(1, 3)), new Tuple<>(dataStreamNames[3], randomIntBetween(1, 3)) ), - org.opensearch.common.collect.List.of() + List.of() ); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); ClusterState newState = DeleteDataStreamAction.TransportAction.removeDataStream(getMetadataDeleteIndexService(), cs, req); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java index 25734a9824c41..54e83bc764cad 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java @@ -82,10 +82,7 @@ protected Request createTestInstance() { public void testGetDataStream() { final String dataStreamName = "my-data-stream"; - ClusterState cs = getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamName, 1)), - org.opensearch.common.collect.List.of() - ); + ClusterState cs = getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 1)), List.of()); GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamName }); List dataStreams = GetDataStreamAction.TransportAction.getDataStreams( cs, @@ -99,8 +96,8 @@ public void testGetDataStream() { public void testGetDataStreamsWithWildcards() { final String[] dataStreamNames = { "my-data-stream", "another-data-stream" }; ClusterState cs = getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamNames[0], 1), new Tuple<>(dataStreamNames[1], 1)), - org.opensearch.common.collect.List.of() + List.of(new Tuple<>(dataStreamNames[0], 1), new Tuple<>(dataStreamNames[1], 1)), + List.of() ); GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamNames[1].substring(0, 5) + "*" }); @@ -144,8 +141,8 @@ public void testGetDataStreamsWithWildcards() { public void testGetDataStreamsWithoutWildcards() { final String[] dataStreamNames = { "my-data-stream", "another-data-stream" }; ClusterState cs = getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamNames[0], 1), new Tuple<>(dataStreamNames[1], 1)), - org.opensearch.common.collect.List.of() + List.of(new Tuple<>(dataStreamNames[0], 1), new Tuple<>(dataStreamNames[1], 1)), + List.of() ); GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamNames[0], dataStreamNames[1] }); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java index 001efb32c2988..81e73c4889d3b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java @@ -109,7 +109,9 @@ public void testIncludeDefaults() { "index.refresh_interval should be set as we are including defaults", defaultsResponse.getSetting(indexName, "index.refresh_interval") ), - exception -> { throw new AssertionError(exception); } + exception -> { + throw new AssertionError(exception); + } ) ); } @@ -124,7 +126,9 @@ public void testDoNotIncludeDefaults() { "index.refresh_interval should be null as it was never set", noDefaultsResponse.getSetting(indexName, "index.refresh_interval") ), - exception -> { throw new AssertionError(exception); } + exception -> { + throw new AssertionError(exception); + } ) ); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index e6eba34987a44..3fc57f4e32435 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -164,21 +164,18 @@ private static PutMappingRequest createTestItem() throws IOException { public void testResolveIndicesWithWriteIndexOnlyAndDataStreamsAndWriteAliases() { String[] dataStreamNames = { "foo", "bar", "baz" }; - List> dsMetadata = org.opensearch.common.collect.List.of( + List> dsMetadata = List.of( tuple(dataStreamNames[0], randomIntBetween(1, 3)), tuple(dataStreamNames[1], randomIntBetween(1, 3)), tuple(dataStreamNames[2], randomIntBetween(1, 3)) ); - ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams( - dsMetadata, - org.opensearch.common.collect.List.of("index1", "index2", "index3") - ); + ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata, List.of("index1", "index2", "index3")); cs = addAliases( cs, - org.opensearch.common.collect.List.of( - tuple("alias1", org.opensearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))), - tuple("alias2", org.opensearch.common.collect.List.of(tuple("index2", false), tuple("index3", true))) + List.of( + tuple("alias1", List.of(tuple("index1", false), tuple("index2", true))), + tuple("alias2", List.of(tuple("index2", false), tuple("index3", true))) ) ); PutMappingRequest request = new PutMappingRequest().indices("foo", "alias1", "alias2").writeIndexOnly(true); @@ -195,21 +192,18 @@ public void testResolveIndicesWithWriteIndexOnlyAndDataStreamsAndWriteAliases() public void testResolveIndicesWithoutWriteIndexOnlyAndDataStreamsAndWriteAliases() { String[] dataStreamNames = { "foo", "bar", "baz" }; - List> dsMetadata = org.opensearch.common.collect.List.of( + List> dsMetadata = List.of( tuple(dataStreamNames[0], randomIntBetween(1, 3)), tuple(dataStreamNames[1], randomIntBetween(1, 3)), tuple(dataStreamNames[2], randomIntBetween(1, 3)) ); - ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams( - dsMetadata, - org.opensearch.common.collect.List.of("index1", "index2", "index3") - ); + ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata, List.of("index1", "index2", "index3")); cs = addAliases( cs, - org.opensearch.common.collect.List.of( - tuple("alias1", org.opensearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))), - tuple("alias2", org.opensearch.common.collect.List.of(tuple("index2", false), tuple("index3", true))) + List.of( + tuple("alias1", List.of(tuple("index1", false), tuple("index2", true))), + tuple("alias2", List.of(tuple("index2", false), tuple("index3", true))) ) ); PutMappingRequest request = new PutMappingRequest().indices("foo", "alias1", "alias2"); @@ -221,28 +215,25 @@ public void testResolveIndicesWithoutWriteIndexOnlyAndDataStreamsAndWriteAliases List indexNames = Arrays.stream(indices).map(Index::getName).collect(Collectors.toList()); IndexAbstraction expectedDs = cs.metadata().getIndicesLookup().get("foo"); List expectedIndices = expectedDs.getIndices().stream().map(im -> im.getIndex().getName()).collect(Collectors.toList()); - expectedIndices.addAll(org.opensearch.common.collect.List.of("index1", "index2", "index3")); + expectedIndices.addAll(List.of("index1", "index2", "index3")); // should resolve the data stream and each alias to _all_ their respective indices assertThat(indexNames, containsInAnyOrder(expectedIndices.toArray())); } public void testResolveIndicesWithWriteIndexOnlyAndDataStreamAndIndex() { String[] dataStreamNames = { "foo", "bar", "baz" }; - List> dsMetadata = org.opensearch.common.collect.List.of( + List> dsMetadata = List.of( tuple(dataStreamNames[0], randomIntBetween(1, 3)), tuple(dataStreamNames[1], randomIntBetween(1, 3)), tuple(dataStreamNames[2], randomIntBetween(1, 3)) ); - ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams( - dsMetadata, - org.opensearch.common.collect.List.of("index1", "index2", "index3") - ); + ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata, List.of("index1", "index2", "index3")); cs = addAliases( cs, - org.opensearch.common.collect.List.of( - tuple("alias1", org.opensearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))), - tuple("alias2", org.opensearch.common.collect.List.of(tuple("index2", false), tuple("index3", true))) + List.of( + tuple("alias1", List.of(tuple("index1", false), tuple("index2", true))), + tuple("alias2", List.of(tuple("index2", false), tuple("index3", true))) ) ); PutMappingRequest request = new PutMappingRequest().indices("foo", "index3").writeIndexOnly(true); @@ -254,28 +245,25 @@ public void testResolveIndicesWithWriteIndexOnlyAndDataStreamAndIndex() { List indexNames = Arrays.stream(indices).map(Index::getName).collect(Collectors.toList()); IndexAbstraction expectedDs = cs.metadata().getIndicesLookup().get("foo"); List expectedIndices = expectedDs.getIndices().stream().map(im -> im.getIndex().getName()).collect(Collectors.toList()); - expectedIndices.addAll(org.opensearch.common.collect.List.of("index1", "index2", "index3")); + expectedIndices.addAll(List.of("index1", "index2", "index3")); // should resolve the data stream and each alias to _all_ their respective indices assertThat(indexNames, containsInAnyOrder(expectedDs.getWriteIndex().getIndex().getName(), "index3")); } public void testResolveIndicesWithWriteIndexOnlyAndNoSingleWriteIndex() { String[] dataStreamNames = { "foo", "bar", "baz" }; - List> dsMetadata = org.opensearch.common.collect.List.of( + List> dsMetadata = List.of( tuple(dataStreamNames[0], randomIntBetween(1, 3)), tuple(dataStreamNames[1], randomIntBetween(1, 3)), tuple(dataStreamNames[2], randomIntBetween(1, 3)) ); - ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams( - dsMetadata, - org.opensearch.common.collect.List.of("index1", "index2", "index3") - ); + ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata, List.of("index1", "index2", "index3")); final ClusterState cs2 = addAliases( cs, - org.opensearch.common.collect.List.of( - tuple("alias1", org.opensearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))), - tuple("alias2", org.opensearch.common.collect.List.of(tuple("index2", false), tuple("index3", true))) + List.of( + tuple("alias1", List.of(tuple("index1", false), tuple("index2", true))), + tuple("alias2", List.of(tuple("index2", false), tuple("index3", true))) ) ); PutMappingRequest request = new PutMappingRequest().indices("*").writeIndexOnly(true); @@ -288,21 +276,18 @@ public void testResolveIndicesWithWriteIndexOnlyAndNoSingleWriteIndex() { public void testResolveIndicesWithWriteIndexOnlyAndAliasWithoutWriteIndex() { String[] dataStreamNames = { "foo", "bar", "baz" }; - List> dsMetadata = org.opensearch.common.collect.List.of( + List> dsMetadata = List.of( tuple(dataStreamNames[0], randomIntBetween(1, 3)), tuple(dataStreamNames[1], randomIntBetween(1, 3)), tuple(dataStreamNames[2], randomIntBetween(1, 3)) ); - ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams( - dsMetadata, - org.opensearch.common.collect.List.of("index1", "index2", "index3") - ); + ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata, List.of("index1", "index2", "index3")); final ClusterState cs2 = addAliases( cs, - org.opensearch.common.collect.List.of( - tuple("alias1", org.opensearch.common.collect.List.of(tuple("index1", false), tuple("index2", false))), - tuple("alias2", org.opensearch.common.collect.List.of(tuple("index2", false), tuple("index3", false))) + List.of( + tuple("alias1", List.of(tuple("index1", false), tuple("index2", false))), + tuple("alias2", List.of(tuple("index2", false), tuple("index3", false))) ) ); PutMappingRequest request = new PutMappingRequest().indices("alias2").writeIndexOnly(true); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java index 9588e1272577d..ba1150aef6703 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java @@ -92,9 +92,7 @@ protected Response createTestInstance() { private static ResolvedIndex createTestResolvedIndexInstance() { String name = randomAlphaOfLength(6); String[] aliases = randomStringArray(0, 5); - String[] attributes = randomSubsetOf(org.opensearch.common.collect.List.of("open", "hidden", "frozen")).toArray( - Strings.EMPTY_ARRAY - ); + String[] attributes = randomSubsetOf(List.of("open", "hidden", "frozen")).toArray(Strings.EMPTY_ARRAY); String dataStream = randomBoolean() ? randomAlphaOfLength(6) : null; return new ResolvedIndex(name, aliases, attributes, dataStream); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java index 98e833b051032..d73ba0c7e9105 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java @@ -71,15 +71,7 @@ protected PutComposableIndexTemplateAction.Request mutateInstance(PutComposableI public void testPutGlobalTemplatesCannotHaveHiddenIndexSetting() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); - ComposableIndexTemplate globalTemplate = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("*"), - template, - null, - null, - null, - null, - null - ); + ComposableIndexTemplate globalTemplate = new ComposableIndexTemplate(List.of("*"), template, null, null, null, null, null); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index 32a0b3723f7ae..c51c9f7ea77dc 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -58,49 +58,19 @@ public void testIndexRequest() throws IOException { }, req -> fail(), req -> fail()); assertTrue(parsed.get()); - parser.parse( - request, - "foo", - null, - null, - null, - true, - false, - XContentType.JSON, - indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, - req -> fail(), - req -> fail() - ); + parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, indexRequest -> { + assertTrue(indexRequest.isRequireAlias()); + }, req -> fail(), req -> fail()); request = new BytesArray("{ \"index\":{ \"_id\": \"bar\", \"require_alias\": true } }\n{}\n"); - parser.parse( - request, - "foo", - null, - null, - null, - null, - false, - XContentType.JSON, - indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, - req -> fail(), - req -> fail() - ); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { + assertTrue(indexRequest.isRequireAlias()); + }, req -> fail(), req -> fail()); request = new BytesArray("{ \"index\":{ \"_id\": \"bar\", \"require_alias\": false } }\n{}\n"); - parser.parse( - request, - "foo", - null, - null, - null, - true, - false, - XContentType.JSON, - indexRequest -> { assertFalse(indexRequest.isRequireAlias()); }, - req -> fail(), - req -> fail() - ); + parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, indexRequest -> { + assertFalse(indexRequest.isRequireAlias()); + }, req -> fail(), req -> fail()); } public void testDeleteRequest() throws IOException { @@ -129,49 +99,19 @@ public void testUpdateRequest() throws IOException { }, req -> fail()); assertTrue(parsed.get()); - parser.parse( - request, - "foo", - null, - null, - null, - true, - false, - XContentType.JSON, - req -> fail(), - updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, - req -> fail() - ); + parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, req -> fail(), updateRequest -> { + assertTrue(updateRequest.isRequireAlias()); + }, req -> fail()); request = new BytesArray("{ \"update\":{ \"_id\": \"bar\", \"require_alias\": true } }\n{}\n"); - parser.parse( - request, - "foo", - null, - null, - null, - null, - false, - XContentType.JSON, - req -> fail(), - updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, - req -> fail() - ); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), updateRequest -> { + assertTrue(updateRequest.isRequireAlias()); + }, req -> fail()); request = new BytesArray("{ \"update\":{ \"_id\": \"bar\", \"require_alias\": false } }\n{}\n"); - parser.parse( - request, - "foo", - null, - null, - null, - true, - false, - XContentType.JSON, - req -> fail(), - updateRequest -> { assertFalse(updateRequest.isRequireAlias()); }, - req -> fail() - ); + parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, req -> fail(), updateRequest -> { + assertFalse(updateRequest.isRequireAlias()); + }, req -> fail()); } public void testBarfOnLackOfTrailingNewline() { diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 32e9dd44008cd..0846a5f8dec5c 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -96,11 +96,9 @@ public void testAllFail() { bulkRequest.add(new IndexRequest("can't")); bulkRequest.add(new DeleteRequest("do").version(0).versionType(VersionType.EXTERNAL)); bulkRequest.add(new UpdateRequest("nothin", randomAlphaOfLength(5))); - indicesThatCannotBeCreatedTestCase( - new HashSet<>(Arrays.asList("no", "can't", "do", "nothin")), - bulkRequest, - index -> { throw new IndexNotFoundException("Can't make it because I say so"); } - ); + indicesThatCannotBeCreatedTestCase(new HashSet<>(Arrays.asList("no", "can't", "do", "nothin")), bulkRequest, index -> { + throw new IndexNotFoundException("Can't make it because I say so"); + }); } public void testSomeFail() { diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 202f1b7dcb5b4..5b96c2a71dbf8 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -302,11 +302,9 @@ public void testIngestSkipped() throws Exception { public void testSingleItemBulkActionIngestSkipped() throws Exception { IndexRequest indexRequest = new IndexRequest("index").id("id"); indexRequest.source(emptyMap()); - singleItemBulkWriteAction.execute( - null, - indexRequest, - ActionListener.wrap(response -> {}, exception -> { throw new AssertionError(exception); }) - ); + singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap(response -> {}, exception -> { + throw new AssertionError(exception); + })); assertTrue(action.isExecuted); verifyNoInteractions(ingestService); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 5eb395cb05971..6c23092c789d1 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -74,6 +74,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.TimeUnit; @@ -343,19 +344,17 @@ public void testIncludesSystem() { ".bar", new Index(IndexMetadata.builder(".bar").settings(settings).system(true).numberOfShards(1).numberOfReplicas(0).build()) ); - SystemIndices systemIndices = new SystemIndices( - org.opensearch.common.collect.Map.of("plugin", org.opensearch.common.collect.List.of(new SystemIndexDescriptor(".test", ""))) - ); - List onlySystem = org.opensearch.common.collect.List.of(".foo", ".bar"); + SystemIndices systemIndices = new SystemIndices(Map.of("plugin", List.of(new SystemIndexDescriptor(".test", "")))); + List onlySystem = List.of(".foo", ".bar"); assertTrue(bulkAction.includesSystem(buildBulkRequest(onlySystem), indicesLookup, systemIndices)); - onlySystem = org.opensearch.common.collect.List.of(".foo", ".bar", ".test"); + onlySystem = List.of(".foo", ".bar", ".test"); assertTrue(bulkAction.includesSystem(buildBulkRequest(onlySystem), indicesLookup, systemIndices)); - List nonSystem = org.opensearch.common.collect.List.of("foo", "bar"); + List nonSystem = List.of("foo", "bar"); assertFalse(bulkAction.includesSystem(buildBulkRequest(nonSystem), indicesLookup, systemIndices)); - List mixed = org.opensearch.common.collect.List.of(".foo", ".test", "other"); + List mixed = List.of(".foo", ".test", "other"); assertTrue(bulkAction.includesSystem(buildBulkRequest(mixed), indicesLookup, systemIndices)); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index 2aff8f6bfc6ab..1d7828027baa0 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -69,6 +69,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.VersionConflictEngineException; @@ -1070,6 +1071,7 @@ public void testHandlePrimaryTermValidationRequestWithDifferentAllocationId() { mock(UpdateHelper.class), mock(ActionFilters.class), mock(IndexingPressureService.class), + mock(SegmentReplicationPressureService.class), mock(SystemIndices.class) ); action.handlePrimaryTermValidationRequest( @@ -1099,6 +1101,7 @@ public void testHandlePrimaryTermValidationRequestWithOlderPrimaryTerm() { mock(UpdateHelper.class), mock(ActionFilters.class), mock(IndexingPressureService.class), + mock(SegmentReplicationPressureService.class), mock(SystemIndices.class) ); action.handlePrimaryTermValidationRequest( @@ -1128,6 +1131,7 @@ public void testHandlePrimaryTermValidationRequestSuccess() { mock(UpdateHelper.class), mock(ActionFilters.class), mock(IndexingPressureService.class), + mock(SegmentReplicationPressureService.class), mock(SystemIndices.class) ); action.handlePrimaryTermValidationRequest( @@ -1168,6 +1172,7 @@ private TransportShardBulkAction createAction() { mock(UpdateHelper.class), mock(ActionFilters.class), mock(IndexingPressureService.class), + mock(SegmentReplicationPressureService.class), mock(SystemIndices.class) ); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index d6a0ecd0512dc..277f2f1dee0bf 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -317,7 +317,9 @@ public void testFanOutAndCollect() throws InterruptedException { AtomicReference response = new AtomicReference<>(); ActionListener responseListener = ActionListener.wrap( searchResponse -> response.set((TestSearchResponse) searchResponse), - (e) -> { throw new AssertionError("unexpected", e); } + (e) -> { + throw new AssertionError("unexpected", e); + } ); DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); diff --git a/server/src/test/java/org/opensearch/action/support/AutoCreateIndexTests.java b/server/src/test/java/org/opensearch/action/support/AutoCreateIndexTests.java index 068414678e860..7263cbe90c1b7 100644 --- a/server/src/test/java/org/opensearch/action/support/AutoCreateIndexTests.java +++ b/server/src/test/java/org/opensearch/action/support/AutoCreateIndexTests.java @@ -231,7 +231,7 @@ public void testUpdate() { settings, clusterSettings, new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), - new SystemIndices(org.opensearch.common.collect.Map.of()) + new SystemIndices(Map.of()) ); assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(value)); @@ -257,12 +257,7 @@ private static ClusterState buildClusterState(String... indices) { } private AutoCreateIndex newAutoCreateIndex(Settings settings) { - SystemIndices systemIndices = new SystemIndices( - org.opensearch.common.collect.Map.of( - "plugin", - org.opensearch.common.collect.List.of(new SystemIndexDescriptor(TEST_SYSTEM_INDEX_NAME, "")) - ) - ); + SystemIndices systemIndices = new SystemIndices(Map.of("plugin", List.of(new SystemIndexDescriptor(TEST_SYSTEM_INDEX_NAME, "")))); return new AutoCreateIndex( settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), diff --git a/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java b/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java index 2f24ff3fb1f29..eca49516f42c2 100644 --- a/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java +++ b/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java @@ -77,9 +77,8 @@ protected void } private ActionListener listenerThatAssertsOriginNotSet(ThreadContext threadContext) { - return ActionListener.wrap( - r -> { assertNull(threadContext.getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); }, - e -> { fail("didn't expect to fail but: " + e); } - ); + return ActionListener.wrap(r -> { assertNull(threadContext.getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); }, e -> { + fail("didn't expect to fail but: " + e); + }); } } diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 70fdb91ee0632..1cd21482566ef 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -59,6 +59,7 @@ import org.opensearch.test.client.NoOpClient; import org.opensearch.threadpool.ThreadPool; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.cluster.InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING; @@ -93,7 +94,9 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { "test", "clusterManagerService", threadPool, - r -> { fail("cluster-manager service should not run any tasks"); } + r -> { + fail("cluster-manager service should not run any tasks"); + } ); final ClusterService clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); @@ -209,11 +212,7 @@ protected void if (request instanceof NodesStatsRequest || request instanceof IndicesStatsRequest) { requestCount++; // ClusterInfoService handles ClusterBlockExceptions quietly, so we invent such an exception to avoid excess logging - listener.onFailure( - new ClusterBlockException( - org.opensearch.common.collect.Set.of(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL) - ) - ); + listener.onFailure(new ClusterBlockException(Set.of(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL))); } else { fail("unexpected action: " + action.name()); } diff --git a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java index 7325024138500..20fde9fc66c32 100644 --- a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java @@ -45,7 +45,6 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.Map; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.Index; @@ -57,6 +56,7 @@ import org.opensearch.test.OpenSearchTestCase; import java.util.List; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java index df5b6566b503e..a0ac9d94c8c37 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java @@ -41,6 +41,7 @@ import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -129,7 +130,7 @@ private ClusterState addClusterManagerNodes(ClusterState clusterState) { private ClusterState addDataNodeForAZone(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds) + List.of(nodeIds) .forEach( nodeId -> nodeBuilder.add( new DiscoveryNode( @@ -148,7 +149,7 @@ private ClusterState addDataNodeForAZone(ClusterState clusterState, String zone, private ClusterState addClusterManagerNodeForAZone(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds) + List.of(nodeIds) .forEach( nodeId -> nodeBuilder.add( new DiscoveryNode( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java index fb3a628827462..137de1355f11b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java @@ -125,9 +125,9 @@ public void testBootstrapsAutomaticallyWithDefaultConfiguration() { settings.put(UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.getKey(), timeout + "ms"); } - final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>( - () -> { throw new AssertionError("should not be called yet"); } - ); + final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>(() -> { + throw new AssertionError("should not be called yet"); + }); final AtomicBoolean bootstrapped = new AtomicBoolean(); ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( @@ -163,13 +163,9 @@ public void testDoesNothingByDefaultIfMasterNodesConfigured() { } private void testDoesNothingWithSettings(Settings.Builder builder) { - ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( - builder.build(), - transportService, - () -> { throw new AssertionError("should not be called"); }, - () -> false, - vc -> { throw new AssertionError("should not be called"); } - ); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService, () -> { + throw new AssertionError("should not be called"); + }, () -> false, vc -> { throw new AssertionError("should not be called"); }); transportService.start(); clusterBootstrapService.scheduleUnconfiguredBootstrap(); deterministicTaskQueue.runAllTasks(); @@ -182,7 +178,9 @@ public void testThrowsExceptionOnDuplicates() { transportService, Collections::emptyList, () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); }); @@ -236,7 +234,9 @@ public void testDoesNotBootstrapsOnNonMasterNode() { transportService, () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); assertWarnings(CLUSTER_SETTING_DEPRECATED_MESSAGE); transportService.start(); @@ -250,7 +250,9 @@ public void testDoesNotBootstrapsIfLocalNodeNotInInitialMasterNodes() { transportService, () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); assertWarnings(CLUSTER_SETTING_DEPRECATED_MESSAGE); transportService.start(); @@ -264,7 +266,9 @@ public void testDoesNotBootstrapsIfNotConfigured() { transportService, () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); assertWarnings(CLUSTER_SETTING_DEPRECATED_MESSAGE); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index 9f9ccf34a6a9d..be7b32d4aef11 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -126,9 +126,9 @@ public void testBootstrapsAutomaticallyWithDefaultConfiguration() { settings.put(UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.getKey(), timeout + "ms"); } - final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>( - () -> { throw new AssertionError("should not be called yet"); } - ); + final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>(() -> { + throw new AssertionError("should not be called yet"); + }); final AtomicBoolean bootstrapped = new AtomicBoolean(); ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( @@ -182,13 +182,9 @@ public void testDoesNothingByDefaultOnClusterManagerIneligibleNodes() { } private void testDoesNothingWithSettings(Settings.Builder builder) { - ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( - builder.build(), - transportService, - () -> { throw new AssertionError("should not be called"); }, - () -> false, - vc -> { throw new AssertionError("should not be called"); } - ); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService, () -> { + throw new AssertionError("should not be called"); + }, () -> false, vc -> { throw new AssertionError("should not be called"); }); transportService.start(); clusterBootstrapService.scheduleUnconfiguredBootstrap(); deterministicTaskQueue.runAllTasks(); @@ -201,7 +197,9 @@ public void testThrowsExceptionOnDuplicates() { transportService, Collections::emptyList, () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); }); @@ -330,7 +328,9 @@ public void testDoesNotBootstrapIfNoNodesDiscovered() { transportService, Collections::emptyList, () -> true, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); @@ -353,7 +353,9 @@ public void testDoesNotBootstrapIfTwoOfFiveNodesDiscovered() { transportService, () -> Stream.of(otherNode1).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); @@ -377,7 +379,9 @@ public void testDoesNotBootstrapIfThreeOfSixNodesDiscovered() { transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); @@ -393,7 +397,9 @@ public void testDoesNotBootstrapIfAlreadyBootstrapped() { transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> true, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); @@ -417,7 +423,9 @@ public void testDoesNotBootstrapsOnNonClusterManagerNode() { transportService, () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); clusterBootstrapService.onFoundPeersUpdated(); @@ -430,7 +438,9 @@ public void testDoesNotBootstrapsIfLocalNodeNotInInitialClusterManagerNodes() { transportService, () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); clusterBootstrapService.onFoundPeersUpdated(); @@ -443,7 +453,9 @@ public void testDoesNotBootstrapsIfNotConfigured() { transportService, () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); clusterBootstrapService.scheduleUnconfiguredBootstrap(); @@ -484,7 +496,9 @@ public void testCancelsBootstrapIfRequirementMatchesMultipleNodes() { transportService, discoveredNodes::get, () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); @@ -507,7 +521,9 @@ public void testCancelsBootstrapIfNodeMatchesMultipleRequirements() { transportService, discoveredNodes::get, () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); @@ -593,7 +609,9 @@ public void testDoesNotJustMatchEverything() { transportService, Collections::emptyList, () -> false, - vc -> { throw new AssertionError("should not be called"); } + vc -> { + throw new AssertionError("should not be called"); + } ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index feede63455280..2dd5d3980a5d3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -48,6 +48,7 @@ import java.util.Arrays; import java.util.HashSet; +import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -288,7 +289,7 @@ public void testDescriptionOnUnhealthyNodes() { "local", buildNewFakeTransportAddress(), emptyMap(), - org.opensearch.common.collect.Set.of(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + Set.of(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), Version.CURRENT ); clusterState = ClusterState.builder(ClusterName.DEFAULT) diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index 74c5d0fcccbed..c1a1a63a31e30 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -1232,11 +1232,9 @@ public void testClusterCannotFormWithFailingJoinValidation() { nodes.stream().map(ClusterNode::getLocalNode).map(DiscoveryNode::getId).collect(Collectors.toSet()) ) == false, () -> randomSubsetOf(cluster.clusterNodes) - ).forEach( - cn -> cn.extraJoinValidators.add( - (discoveryNode, clusterState) -> { throw new IllegalArgumentException("join validation failed"); } - ) - ); + ).forEach(cn -> cn.extraJoinValidators.add((discoveryNode, clusterState) -> { + throw new IllegalArgumentException("join validation failed"); + })); cluster.bootstrapIfNecessary(); cluster.runFor(10000, "failing join validation"); assertTrue(cluster.clusterNodes.stream().allMatch(cn -> cn.getLastAppliedClusterState().version() == 0)); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index 5bbbed5f4f8c0..0862597937e9d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -132,7 +132,9 @@ protected void onSendRequest(long requestId, String action, TransportRequest req settings, transportService, fcr -> { assert false : fcr; }, - (node, reason) -> { assert false : node; }, + (node, reason) -> { + assert false : node; + }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info") ); @@ -705,7 +707,9 @@ public void testPreferClusterManagerNodes() { Settings.EMPTY, transportService, fcr -> { assert false : fcr; }, - (node, reason) -> { assert false : node; }, + (node, reason) -> { + assert false : node; + }, () -> new StatusInfo(HEALTHY, "healthy-info") ); followersChecker.setCurrentNodes(discoveryNodes); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index 7b21042b2ed4a..b56abe101bd4c 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -86,7 +86,9 @@ public void testJoinDeduplication() { transportService, () -> 0L, () -> null, - (joinRequest, joinCallback) -> { throw new AssertionError(); }, + (joinRequest, joinCallback) -> { + throw new AssertionError(); + }, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, @@ -220,20 +222,11 @@ private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, null, Collections.emptySet() ); - new JoinHelper( - Settings.EMPTY, - null, - null, - transportService, - () -> 0L, - () -> localClusterState, - (joinRequest, joinCallback) -> { throw new AssertionError(); }, - startJoinRequest -> { throw new AssertionError(); }, - Collections.emptyList(), - (s, p, r) -> {}, - null, - nodeCommissioned -> {} - ); // registers request handler + new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> localClusterState, (joinRequest, joinCallback) -> { + throw new AssertionError(); + }, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, null, nodeCommissioned -> {}); // registers + // request + // handler transportService.start(); transportService.acceptIncomingRequests(); @@ -282,7 +275,9 @@ public void testJoinFailureOnUnhealthyNodes() { transportService, () -> 0L, () -> null, - (joinRequest, joinCallback) -> { throw new AssertionError(); }, + (joinRequest, joinCallback) -> { + throw new AssertionError(); + }, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 75b86a12c7d14..ed4c4018bfd1b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -48,7 +48,6 @@ import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.common.UUIDs; -import org.opensearch.common.collect.List; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -57,6 +56,7 @@ import java.util.HashMap; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.is; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index ec8d5bcf1c687..2752f57b499b3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -67,6 +67,7 @@ import org.opensearch.transport.TestTransportChannel; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; @@ -218,6 +219,17 @@ protected void onSendRequest(long requestId, String action, TransportRequest req super.onSendRequest(requestId, action, request, destination); } } + + @Override + protected void onSendRequest( + long requestId, + String action, + TransportRequest request, + DiscoveryNode destination, + TransportRequestOptions options + ) { + onSendRequest(requestId, action, request, destination); + } }; final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); TransportService transportService = capturingTransport.createTransportService( @@ -517,14 +529,9 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ) ); - assertTrue( - MasterServiceTests.discoveryState(clusterManagerService) - .getVotingConfigExclusions() - .stream() - .anyMatch( - exclusion -> { return "knownNodeName".equals(exclusion.getNodeName()) && "newNodeId".equals(exclusion.getNodeId()); } - ) - ); + assertTrue(MasterServiceTests.discoveryState(clusterManagerService).getVotingConfigExclusions().stream().anyMatch(exclusion -> { + return "knownNodeName".equals(exclusion.getNodeName()) && "newNodeId".equals(exclusion.getNodeId()); + })); } private ClusterState buildStateWithVotingConfigExclusion( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java b/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java index e88633fbf9399..242307d220381 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java @@ -39,7 +39,6 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.collect.List; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; @@ -50,6 +49,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.List; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 9736355629fd9..7108e06fe39fc 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -40,6 +40,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -265,7 +266,7 @@ public void onFailure(Exception e) { private ClusterState addNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newNode(nodeId, singletonMap("zone", zone)))); + List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newNode(nodeId, singletonMap("zone", zone)))); clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); return clusterState; } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index c1ce0a3534720..d4bc474ae0c10 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -42,6 +42,7 @@ import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -421,15 +422,14 @@ private void setWeightedRoutingWeights(Map weights) { private ClusterState addDataNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); + List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); return clusterState; } private ClusterState addClusterManagerNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds) - .forEach(nodeId -> nodeBuilder.add(newClusterManagerNode(nodeId, singletonMap("zone", zone)))); + List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newClusterManagerNode(nodeId, singletonMap("zone", zone)))); clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); return clusterState; } @@ -474,7 +474,7 @@ private static DiscoveryNode newClusterManagerNode(String nodeId, Map { builder.settings(Settings.builder().put("template_setting", "value1")); } - ); + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); ImmutableOpenMap.Builder templatesBuilder = ImmutableOpenMap.builder(); templatesBuilder.put("template_1", templateMetadata); Metadata metadata = new Metadata.Builder().templates(templatesBuilder.build()).build(); @@ -957,13 +957,7 @@ public void testClusterStateCreateIndexThrowsWriteIndexValidationException() thr assertThat( expectThrows( IllegalStateException.class, - () -> clusterStateCreateIndex( - currentClusterState, - org.opensearch.common.collect.Set.of(), - newIndex, - (state, reason) -> state, - null - ) + () -> clusterStateCreateIndex(currentClusterState, Set.of(), newIndex, (state, reason) -> state, null) ).getMessage(), startsWith("alias [alias1] has more than one write index [") ); @@ -988,7 +982,7 @@ public void testClusterStateCreateIndex() { ClusterState updatedClusterState = clusterStateCreateIndex( currentClusterState, - org.opensearch.common.collect.Set.of(INDEX_READ_ONLY_BLOCK), + Set.of(INDEX_READ_ONLY_BLOCK), newIndexMetadata, rerouteRoutingTable, null @@ -1027,7 +1021,7 @@ public void testClusterStateCreateIndexWithMetadataTransaction() { ClusterState updatedClusterState = clusterStateCreateIndex( currentClusterState, - org.opensearch.common.collect.Set.of(INDEX_READ_ONLY_BLOCK), + Set.of(INDEX_READ_ONLY_BLOCK), newIndexMetadata, (clusterState, y) -> clusterState, metadataTransformer diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java index 5caea9f5bf674..dbff833cfee60 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java @@ -98,7 +98,7 @@ public void testDeleteSnapshotting() { String index = randomAlphaOfLength(5); Snapshot snapshot = new Snapshot("doesn't matter", new SnapshotId("snapshot name", "snapshot uuid")); SnapshotsInProgress snaps = SnapshotsInProgress.of( - org.opensearch.common.collect.List.of( + List.of( new SnapshotsInProgress.Entry( snapshot, true, @@ -153,14 +153,14 @@ public void testDeleteBackingIndexForDataStream() { int numBackingIndices = randomIntBetween(2, 5); String dataStreamName = randomAlphaOfLength(6).toLowerCase(Locale.ROOT); ClusterState before = DataStreamTestHelper.getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamName, numBackingIndices)), - org.opensearch.common.collect.List.of() + List.of(new Tuple<>(dataStreamName, numBackingIndices)), + List.of() ); int numIndexToDelete = randomIntBetween(1, numBackingIndices - 1); Index indexToDelete = before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, numIndexToDelete)).getIndex(); - ClusterState after = service.deleteIndices(before, org.opensearch.common.collect.Set.of(indexToDelete)); + ClusterState after = service.deleteIndices(before, Set.of(indexToDelete)); assertThat(after.metadata().getIndices().get(indexToDelete.getName()), IsNull.nullValue()); assertThat(after.metadata().getIndices().size(), equalTo(numBackingIndices - 1)); @@ -175,8 +175,8 @@ public void testDeleteMultipleBackingIndexForDataStream() { int numBackingIndicesToDelete = randomIntBetween(2, numBackingIndices - 1); String dataStreamName = randomAlphaOfLength(6).toLowerCase(Locale.ROOT); ClusterState before = DataStreamTestHelper.getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamName, numBackingIndices)), - org.opensearch.common.collect.List.of() + List.of(new Tuple<>(dataStreamName, numBackingIndices)), + List.of() ); List indexNumbersToDelete = randomSubsetOf( @@ -204,15 +204,12 @@ public void testDeleteCurrentWriteIndexForDataStream() { int numBackingIndices = randomIntBetween(1, 5); String dataStreamName = randomAlphaOfLength(6).toLowerCase(Locale.ROOT); ClusterState before = DataStreamTestHelper.getClusterStateWithDataStreams( - org.opensearch.common.collect.List.of(new Tuple<>(dataStreamName, numBackingIndices)), - org.opensearch.common.collect.List.of() + List.of(new Tuple<>(dataStreamName, numBackingIndices)), + List.of() ); Index indexToDelete = before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, numBackingIndices)).getIndex(); - Exception e = expectThrows( - IllegalArgumentException.class, - () -> service.deleteIndices(before, org.opensearch.common.collect.Set.of(indexToDelete)) - ); + Exception e = expectThrows(IllegalArgumentException.class, () -> service.deleteIndices(before, Set.of(indexToDelete))); assertThat( e.getMessage(), diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java index fb01a493ff7c3..5d476b2dbdca5 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -450,10 +450,7 @@ public void testCloseCurrentWriteIndexForDataStream() { dataStreamsToCreate.add(new Tuple<>(dataStreamName, numBackingIndices)); writeIndices.add(DataStream.getDefaultBackingIndexName(dataStreamName, numBackingIndices)); } - ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams( - dataStreamsToCreate, - org.opensearch.common.collect.List.of() - ); + ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dataStreamsToCreate, List.of()); ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(cs); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index d54b7e9389bf7..26f711bba14f9 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -429,9 +429,9 @@ public void testUpdateComponentTemplateWithIndexHiddenSetting() throws Exception assertNotNull(state.metadata().componentTemplates().get("foo")); ComposableIndexTemplate firstGlobalIndexTemplate = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("*"), + List.of("*"), template, - org.opensearch.common.collect.List.of("foo"), + List.of("foo"), 1L, null, null, @@ -440,9 +440,9 @@ public void testUpdateComponentTemplateWithIndexHiddenSetting() throws Exception state = metadataIndexTemplateService.addIndexTemplateV2(state, true, "globalindextemplate1", firstGlobalIndexTemplate); ComposableIndexTemplate secondGlobalIndexTemplate = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("*"), + List.of("*"), template, - org.opensearch.common.collect.List.of("foo"), + List.of("foo"), 2L, null, null, @@ -451,9 +451,9 @@ public void testUpdateComponentTemplateWithIndexHiddenSetting() throws Exception state = metadataIndexTemplateService.addIndexTemplateV2(state, true, "globalindextemplate2", secondGlobalIndexTemplate); ComposableIndexTemplate fooPatternIndexTemplate = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("foo-*"), + List.of("foo-*"), template, - org.opensearch.common.collect.List.of("foo"), + List.of("foo"), 3L, null, null, @@ -616,9 +616,9 @@ public void onFailure(Exception e) { waitToCreateComponentTemplate.await(10, TimeUnit.SECONDS); ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("*"), + List.of("*"), null, - org.opensearch.common.collect.List.of("ct-with-index-hidden-setting"), + List.of("ct-with-index-hidden-setting"), null, null, null, @@ -941,9 +941,9 @@ public void testFindV2InvalidGlobalTemplate() { try { // add an invalid global template that specifies the `index.hidden` setting ComposableIndexTemplate invalidGlobalTemplate = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("*"), + List.of("*"), templateWithHiddenSetting, - org.opensearch.common.collect.List.of("ct"), + List.of("ct"), 5L, 1L, null, @@ -952,9 +952,7 @@ public void testFindV2InvalidGlobalTemplate() { Metadata invalidGlobalTemplateMetadata = Metadata.builder() .putCustom( ComposableIndexTemplateMetadata.TYPE, - new ComposableIndexTemplateMetadata( - org.opensearch.common.collect.Map.of("invalid_global_template", invalidGlobalTemplate) - ) + new ComposableIndexTemplateMetadata(Map.of("invalid_global_template", invalidGlobalTemplate)) ) .build(); @@ -1213,7 +1211,7 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex { ComposableIndexTemplate it = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("logs*"), + List.of("logs*"), new Template( null, new CompressedXContent( @@ -1227,7 +1225,7 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex ), null ), - org.opensearch.common.collect.List.of("ct1"), + List.of("ct1"), 0L, 1L, null, @@ -1245,7 +1243,7 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex assertThat(mappings.size(), equalTo(4)); List> parsedMappings = mappings.stream().map(m -> { try { - return MapperService.parseMapping(new NamedXContentRegistry(org.opensearch.common.collect.List.of()), m.string()); + return MapperService.parseMapping(new NamedXContentRegistry(List.of()), m.string()); } catch (Exception e) { logger.error(e); fail("failed to parse mappings: " + m.string()); @@ -1253,38 +1251,23 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex } }).collect(Collectors.toList()); - Map firstParsedMapping = org.opensearch.common.collect.Map.of( + Map firstParsedMapping = Map.of( "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of(TIMESTAMP_FIELD.getName(), org.opensearch.common.collect.Map.of("type", "date")) - ) + Map.of("properties", Map.of(TIMESTAMP_FIELD.getName(), Map.of("type", "date"))) ); assertThat(parsedMappings.get(0), equalTo(firstParsedMapping)); - Map secondMapping = org.opensearch.common.collect.Map.of( - "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of("field1", org.opensearch.common.collect.Map.of("type", "keyword")) - ) - ); + Map secondMapping = Map.of("_doc", Map.of("properties", Map.of("field1", Map.of("type", "keyword")))); assertThat(parsedMappings.get(1), equalTo(secondMapping)); - Map thirdMapping = org.opensearch.common.collect.Map.of( - "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of("field2", org.opensearch.common.collect.Map.of("type", "integer")) - ) - ); + Map thirdMapping = Map.of("_doc", Map.of("properties", Map.of("field2", Map.of("type", "integer")))); assertThat(parsedMappings.get(2), equalTo(thirdMapping)); } { // indices matched by templates without the data stream field defined don't get the default @timestamp mapping ComposableIndexTemplate it = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("timeseries*"), + List.of("timeseries*"), new Template( null, new CompressedXContent( @@ -1298,7 +1281,7 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex ), null ), - org.opensearch.common.collect.List.of("ct1"), + List.of("ct1"), 0L, 1L, null, @@ -1312,7 +1295,7 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex assertThat(mappings.size(), equalTo(2)); List> parsedMappings = mappings.stream().map(m -> { try { - return MapperService.parseMapping(new NamedXContentRegistry(org.opensearch.common.collect.List.of()), m.string()); + return MapperService.parseMapping(new NamedXContentRegistry(List.of()), m.string()); } catch (Exception e) { logger.error(e); fail("failed to parse mappings: " + m.string()); @@ -1320,22 +1303,10 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex } }).collect(Collectors.toList()); - Map firstMapping = org.opensearch.common.collect.Map.of( - "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of("field1", org.opensearch.common.collect.Map.of("type", "keyword")) - ) - ); + Map firstMapping = Map.of("_doc", Map.of("properties", Map.of("field1", Map.of("type", "keyword")))); assertThat(parsedMappings.get(0), equalTo(firstMapping)); - Map secondMapping = org.opensearch.common.collect.Map.of( - "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of("field2", org.opensearch.common.collect.Map.of("type", "integer")) - ) - ); + Map secondMapping = Map.of("_doc", Map.of("properties", Map.of("field2", Map.of("type", "integer")))); assertThat(parsedMappings.get(1), equalTo(secondMapping)); // a default @timestamp mapping will not be added if the matching template doesn't have the data stream field configured, even @@ -1350,7 +1321,7 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex assertThat(mappings.size(), equalTo(2)); parsedMappings = mappings.stream().map(m -> { try { - return MapperService.parseMapping(new NamedXContentRegistry(org.opensearch.common.collect.List.of()), m.string()); + return MapperService.parseMapping(new NamedXContentRegistry(List.of()), m.string()); } catch (Exception e) { logger.error(e); fail("failed to parse mappings: " + m.string()); @@ -1358,22 +1329,10 @@ public void testDefinedTimestampMappingIsAddedForDataStreamTemplates() throws Ex } }).collect(Collectors.toList()); - firstMapping = org.opensearch.common.collect.Map.of( - "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of("field1", org.opensearch.common.collect.Map.of("type", "keyword")) - ) - ); + firstMapping = Map.of("_doc", Map.of("properties", Map.of("field1", Map.of("type", "keyword")))); assertThat(parsedMappings.get(0), equalTo(firstMapping)); - secondMapping = org.opensearch.common.collect.Map.of( - "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of("field2", org.opensearch.common.collect.Map.of("type", "integer")) - ) - ); + secondMapping = Map.of("_doc", Map.of("properties", Map.of("field2", Map.of("type", "integer")))); assertThat(parsedMappings.get(1), equalTo(secondMapping)); } } @@ -1404,9 +1363,9 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception state = service.addComponentTemplate(state, true, "ct1", ct1); ComposableIndexTemplate it = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("logs*"), + List.of("logs*"), null, - org.opensearch.common.collect.List.of("ct1"), + List.of("ct1"), 0L, 1L, null, @@ -1424,7 +1383,7 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception assertThat(mappings.size(), equalTo(3)); List> parsedMappings = mappings.stream().map(m -> { try { - return MapperService.parseMapping(new NamedXContentRegistry(org.opensearch.common.collect.List.of()), m.string()); + return MapperService.parseMapping(new NamedXContentRegistry(List.of()), m.string()); } catch (Exception e) { logger.error(e); fail("failed to parse mappings: " + m.string()); @@ -1432,24 +1391,15 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception } }).collect(Collectors.toList()); - Map firstMapping = org.opensearch.common.collect.Map.of( + Map firstMapping = Map.of( "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of(TIMESTAMP_FIELD.getName(), org.opensearch.common.collect.Map.of("type", "date")) - ) + Map.of("properties", Map.of(TIMESTAMP_FIELD.getName(), Map.of("type", "date"))) ); assertThat(parsedMappings.get(0), equalTo(firstMapping)); - Map secondMapping = org.opensearch.common.collect.Map.of( + Map secondMapping = Map.of( "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of( - TIMESTAMP_FIELD.getName(), - org.opensearch.common.collect.Map.of("type", "date_nanos") - ) - ) + Map.of("properties", Map.of(TIMESTAMP_FIELD.getName(), Map.of("type", "date_nanos"))) ); assertThat(parsedMappings.get(1), equalTo(secondMapping)); } @@ -1470,7 +1420,7 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception null ); ComposableIndexTemplate it = new ComposableIndexTemplate( - org.opensearch.common.collect.List.of("timeseries*"), + List.of("timeseries*"), template, null, 0L, @@ -1490,31 +1440,22 @@ public void testUserDefinedMappingTakesPrecedenceOverDefault() throws Exception assertThat(mappings.size(), equalTo(3)); List> parsedMappings = mappings.stream().map(m -> { try { - return MapperService.parseMapping(new NamedXContentRegistry(org.opensearch.common.collect.List.of()), m.string()); + return MapperService.parseMapping(new NamedXContentRegistry(List.of()), m.string()); } catch (Exception e) { logger.error(e); fail("failed to parse mappings: " + m.string()); return null; } }).collect(Collectors.toList()); - Map firstMapping = org.opensearch.common.collect.Map.of( + Map firstMapping = Map.of( "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of(TIMESTAMP_FIELD.getName(), org.opensearch.common.collect.Map.of("type", "date")) - ) + Map.of("properties", Map.of(TIMESTAMP_FIELD.getName(), Map.of("type", "date"))) ); assertThat(parsedMappings.get(0), equalTo(firstMapping)); - Map secondMapping = org.opensearch.common.collect.Map.of( + Map secondMapping = Map.of( "_doc", - org.opensearch.common.collect.Map.of( - "properties", - org.opensearch.common.collect.Map.of( - TIMESTAMP_FIELD.getName(), - org.opensearch.common.collect.Map.of("type", "date_nanos") - ) - ) + Map.of("properties", Map.of(TIMESTAMP_FIELD.getName(), Map.of("type", "date_nanos"))) ); assertThat(parsedMappings.get(1), equalTo(secondMapping)); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java index 4e7502ada661f..7870423a88476 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java @@ -1065,7 +1065,7 @@ public void testBuilderRejectsDataStreamThatConflictsWithIndex() { IndexMetadata.builder(dataStreamName).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1).build(), false ) - .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), org.opensearch.common.collect.List.of(idx.getIndex()))); + .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), List.of(idx.getIndex()))); IllegalStateException e = expectThrows(IllegalStateException.class, b::build); assertThat( @@ -1084,7 +1084,7 @@ public void testBuilderRejectsDataStreamThatConflictsWithAlias() { IndexMetadata idx = createFirstBackingIndex(dataStreamName).putAlias(AliasMetadata.builder(dataStreamName).build()).build(); Metadata.Builder b = Metadata.builder() .put(idx, false) - .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), org.opensearch.common.collect.List.of(idx.getIndex()))); + .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), List.of(idx.getIndex()))); IllegalStateException e = expectThrows(IllegalStateException.class, b::build); assertThat( @@ -1107,13 +1107,7 @@ public void testBuilderRejectsDataStreamWithConflictingBackingIndices() { Metadata.Builder b = Metadata.builder() .put(validIdx, false) .put(invalidIdx, false) - .put( - new DataStream( - dataStreamName, - createTimestampField("@timestamp"), - org.opensearch.common.collect.List.of(validIdx.getIndex()) - ) - ); + .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), List.of(validIdx.getIndex()))); IllegalStateException e = expectThrows(IllegalStateException.class, b::build); assertThat( @@ -1134,7 +1128,7 @@ public void testBuilderRejectsDataStreamWithConflictingBackingAlias() { IndexMetadata idx = createFirstBackingIndex(dataStreamName).putAlias(new AliasMetadata.Builder(conflictingName)).build(); Metadata.Builder b = Metadata.builder() .put(idx, false) - .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), org.opensearch.common.collect.List.of(idx.getIndex()))); + .put(new DataStream(dataStreamName, createTimestampField("@timestamp"), List.of(idx.getIndex()))); IllegalStateException e = expectThrows(IllegalStateException.class, b::build); assertThat( @@ -1269,7 +1263,7 @@ public void testValidateDataStreamsThrowsExceptionOnConflict() { Index index = standaloneIndexConflictingWithBackingIndices.getIndex(); indicesLookup.put(index.getName(), new IndexAbstraction.Index(standaloneIndexConflictingWithBackingIndices, null)); - DataStreamMetadata dataStreamMetadata = new DataStreamMetadata(org.opensearch.common.collect.Map.of(dataStreamName, dataStream)); + DataStreamMetadata dataStreamMetadata = new DataStreamMetadata(Map.of(dataStreamName, dataStream)); IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, @@ -1362,7 +1356,7 @@ public void testValidateDataStreamsAllowsPrefixedBackingIndices() { indicesLookup.put(indexMeta.getIndex().getName(), new IndexAbstraction.Index(indexMeta, dataStreamAbstraction)); } } - DataStreamMetadata dataStreamMetadata = new DataStreamMetadata(org.opensearch.common.collect.Map.of(dataStreamName, dataStream)); + DataStreamMetadata dataStreamMetadata = new DataStreamMetadata(Map.of(dataStreamName, dataStream)); // prefixed indices with a lower generation than the data stream's generation are allowed even if the non-prefixed, matching the // data stream backing indices naming pattern, indices are already in the system diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java index 253018d7f569f..c0210fe269e59 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -51,6 +51,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import static org.opensearch.cluster.DataStreamTestHelper.createFirstBackingIndex; @@ -128,8 +129,8 @@ public void testSimpleJsonFromAndTo() throws IOException { ) .put(idx1, false) .put(idx2, false) - .put(new DataStream("data-stream1", createTimestampField("@timestamp"), org.opensearch.common.collect.List.of(idx1.getIndex()))) - .put(new DataStream("data-stream2", createTimestampField("@timestamp"), org.opensearch.common.collect.List.of(idx2.getIndex()))) + .put(new DataStream("data-stream1", createTimestampField("@timestamp"), List.of(idx1.getIndex()))) + .put(new DataStream("data-stream2", createTimestampField("@timestamp"), List.of(idx2.getIndex()))) .build(); XContentBuilder builder = JsonXContent.contentBuilder(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WildcardExpressionResolverTests.java index e4027234ac0b4..03807fb5f8c4d 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -302,7 +302,7 @@ public void testResolveDataStreams() { new DataStream( dataStreamName, createTimestampField("@timestamp"), - org.opensearch.common.collect.List.of(firstBackingIndexMetadata.getIndex(), secondBackingIndexMetadata.getIndex()) + List.of(firstBackingIndexMetadata.getIndex(), secondBackingIndexMetadata.getIndex()) ) ); diff --git a/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java index c9c616dab0dbc..5c3a2454c4074 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java @@ -31,6 +31,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonMap; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; @@ -88,7 +89,7 @@ private ClusterState setUpCluster() { public void testFindNextWithoutFailOpen() throws IOException { ClusterState clusterState = setUpCluster(); - + AtomicInteger shardSkipped = new AtomicInteger(); // set up index IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings( @@ -137,8 +138,9 @@ public void testFindNextWithoutFailOpen() throws IOException { // fail open is not executed since fail open conditions don't met SearchShardTarget next = FailAwareWeightedRouting.getInstance() - .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException()); + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); assertNull(next); + assertEquals(1, shardSkipped.get()); } public void testFindNextWithFailOpenDueTo5xx() throws IOException { @@ -198,7 +200,7 @@ public void testFindNextWithFailOpenDueTo5xx() throws IOException { DiscoveryNode node = clusterState.nodes().get("node_zone_b"); // fail open is executed and shard present in node with weighted routing weight zero is returned SearchShardTarget next = FailAwareWeightedRouting.getInstance() - .findNext(searchShardIterator, clusterState, new NodeNotConnectedException(node, "Node is not " + "connected")); + .findNext(searchShardIterator, clusterState, new NodeNotConnectedException(node, "Node is not " + "connected"), () -> {}); assertNotNull(next); assertEquals("node_zone_c", next.getNodeId()); } @@ -206,7 +208,7 @@ public void testFindNextWithFailOpenDueTo5xx() throws IOException { public void testFindNextWithFailOpenDueToUnassignedShard() throws IOException { ClusterState clusterState = setUpCluster(); - + AtomicInteger shardsSkipped = new AtomicInteger(); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings( Settings.builder() @@ -259,8 +261,9 @@ public void testFindNextWithFailOpenDueToUnassignedShard() throws IOException { // since there is an unassigned shard in the cluster, fail open is executed and shard present in node with // weighted routing weight zero is returned SearchShardTarget next = FailAwareWeightedRouting.getInstance() - .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException()); + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardsSkipped.incrementAndGet()); assertNotNull(next); assertEquals("node_zone_c", next.getNodeId()); + assertEquals(1, shardsSkipped.incrementAndGet()); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index 65fc1b902f9a4..81464fcd2610d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -43,6 +43,7 @@ import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -130,7 +131,7 @@ private ClusterState addClusterManagerNodes(ClusterState clusterState) { private ClusterState addDataNodeForAZone(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds) + List.of(nodeIds) .forEach( nodeId -> nodeBuilder.add( new DiscoveryNode( @@ -149,7 +150,7 @@ private ClusterState addDataNodeForAZone(ClusterState clusterState, String zone, private ClusterState addClusterManagerNodeForAZone(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds) + List.of(nodeIds) .forEach( nodeId -> nodeBuilder.add( new DiscoveryNode( diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java index ae10a92a5104e..937d0dd34226f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java @@ -18,6 +18,10 @@ import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CONSTRAINT_WEIGHT; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; public class AllocationConstraintsTests extends OpenSearchAllocationTestCase { @@ -33,13 +37,18 @@ public void testSettings() { settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalanceFactor); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), threshold); + settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), true); service.applySettings(settings.build()); assertEquals(indexBalanceFactor, allocator.getIndexBalance(), 0.01); assertEquals(shardBalance, allocator.getShardBalance(), 0.01); assertEquals(threshold, allocator.getThreshold(), 0.01); + assertEquals(true, allocator.getPreferPrimaryBalance()); + settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), false); + service.applySettings(settings.build()); + assertEquals(false, allocator.getPreferPrimaryBalance()); } /** @@ -50,6 +59,7 @@ public void testIndexShardsPerNodeConstraint() { ShardsBalancer balancer = mock(LocalShardsBalancer.class); BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); AllocationConstraints constraints = new AllocationConstraints(); + constraints.updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); int shardCount = randomIntBetween(1, 500); float avgShardsPerNode = 1.0f + (random().nextFloat()) * 999.0f; @@ -58,9 +68,142 @@ public void testIndexShardsPerNodeConstraint() { when(node.numShards(anyString())).thenReturn(shardCount); when(node.getNodeId()).thenReturn("test-node"); - long expectedWeight = (shardCount >= avgShardsPerNode) ? constraints.CONSTRAINT_WEIGHT : 0; + long expectedWeight = (shardCount >= avgShardsPerNode) ? CONSTRAINT_WEIGHT : 0; assertEquals(expectedWeight, constraints.weight(balancer, node, "index")); } + /** + * Test constraint evaluation logic when with different values of ConstraintMode + * for IndexShardPerNode constraint satisfied and breached. + */ + public void testPerIndexPrimaryShardsConstraint() { + ShardsBalancer balancer = mock(LocalShardsBalancer.class); + BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); + AllocationConstraints constraints = new AllocationConstraints(); + constraints.updateAllocationConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, true); + + final String indexName = "test-index"; + int perIndexPrimaryShardCount = 1; + float avgPerIndexPrimaryShardsPerNode = 2f; + + when(balancer.avgPrimaryShardsPerNode(anyString())).thenReturn(avgPerIndexPrimaryShardsPerNode); + when(node.numPrimaryShards(anyString())).thenReturn(perIndexPrimaryShardCount); + when(node.getNodeId()).thenReturn("test-node"); + + assertEquals(0, constraints.weight(balancer, node, indexName)); + + perIndexPrimaryShardCount = 3; + when(node.numPrimaryShards(anyString())).thenReturn(perIndexPrimaryShardCount); + assertEquals(CONSTRAINT_WEIGHT, constraints.weight(balancer, node, indexName)); + + constraints.updateAllocationConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, false); + assertEquals(0, constraints.weight(balancer, node, indexName)); + } + + /** + * Test constraint evaluation logic when per index primary shard count constraint is breached. + */ + public void testGlobalPrimaryShardsConstraint() { + ShardsBalancer balancer = mock(LocalShardsBalancer.class); + BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); + AllocationConstraints constraints = new AllocationConstraints(); + constraints.updateAllocationConstraint(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, true); + + final String indexName = "test-index"; + int primaryShardCount = 1; + float avgPrimaryShardsPerNode = 2f; + + when(balancer.avgPrimaryShardsPerNode()).thenReturn(avgPrimaryShardsPerNode); + when(node.numPrimaryShards()).thenReturn(primaryShardCount); + when(node.getNodeId()).thenReturn("test-node"); + + assertEquals(0, constraints.weight(balancer, node, indexName)); + + primaryShardCount = 3; + when(node.numPrimaryShards()).thenReturn(primaryShardCount); + assertEquals(CONSTRAINT_WEIGHT, constraints.weight(balancer, node, indexName)); + + constraints.updateAllocationConstraint(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, false); + assertEquals(0, constraints.weight(balancer, node, indexName)); + } + + /** + * Test constraint evaluation logic when both per index and global primary shard count constraint is breached. + */ + public void testPrimaryShardsConstraints() { + ShardsBalancer balancer = mock(LocalShardsBalancer.class); + BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); + AllocationConstraints constraints = new AllocationConstraints(); + constraints.updateAllocationConstraint(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, true); + constraints.updateAllocationConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, true); + + final String indexName = "test-index"; + int perIndexPrimaryShardCount = 1; + float avgPerIndexPrimaryShardCount = 2; + int primaryShardCount = 2; + float avgPrimaryShardsPerNode = 4; + + when(balancer.avgPrimaryShardsPerNode(indexName)).thenReturn(avgPerIndexPrimaryShardCount); + when(node.numPrimaryShards(indexName)).thenReturn(perIndexPrimaryShardCount); + when(balancer.avgPrimaryShardsPerNode()).thenReturn(avgPrimaryShardsPerNode); + when(node.numPrimaryShards()).thenReturn(primaryShardCount); + when(node.getNodeId()).thenReturn("test-node"); + + assertEquals(0, constraints.weight(balancer, node, indexName)); + + // breaching global primary shard count but not per index primary shard count + primaryShardCount = 5; + when(node.numPrimaryShards()).thenReturn(primaryShardCount); + assertEquals(CONSTRAINT_WEIGHT, constraints.weight(balancer, node, indexName)); + + // when per index primary shard count constraint is also breached + perIndexPrimaryShardCount = 3; + when(node.numPrimaryShards(indexName)).thenReturn(perIndexPrimaryShardCount); + assertEquals(2 * CONSTRAINT_WEIGHT, constraints.weight(balancer, node, indexName)); + + // disable both constraints + constraints.updateAllocationConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, false); + constraints.updateAllocationConstraint(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, false); + assertEquals(0, constraints.weight(balancer, node, indexName)); + } + + /** + * Test constraint evaluation logic when with different values of ConstraintMode + * for IndexShardPerNode constraint satisfied and breached. + */ + public void testAllConstraints() { + ShardsBalancer balancer = mock(LocalShardsBalancer.class); + BalancedShardsAllocator.ModelNode node = mock(BalancedShardsAllocator.ModelNode.class); + AllocationConstraints constraints = new AllocationConstraints(); + constraints.updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); + constraints.updateAllocationConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, true); + constraints.updateAllocationConstraint(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, true); + + final String indexName = "test-index"; + int shardCount = randomIntBetween(1, 500); + float avgPerIndexShardsPerNode = 1.0f + (random().nextFloat()) * 999.0f; + + int perIndexPrimaryShardCount = randomIntBetween(1, shardCount); + float avgPerIndexPrimaryShardsPerNode = (random().nextFloat()) * avgPerIndexShardsPerNode; + + float avgPrimaryShardsPerNode = 1.0f + (random().nextFloat()) * 999.0f; + int primaryShardsPerNode = randomIntBetween(1, shardCount); + + when(balancer.avgPrimaryShardsPerNode(indexName)).thenReturn(avgPerIndexPrimaryShardsPerNode); + when(node.numPrimaryShards(indexName)).thenReturn(perIndexPrimaryShardCount); + + when(balancer.avgPrimaryShardsPerNode()).thenReturn(avgPrimaryShardsPerNode); + when(node.numPrimaryShards()).thenReturn(primaryShardsPerNode); + + when(balancer.avgShardsPerNode(indexName)).thenReturn(avgPerIndexShardsPerNode); + when(node.numShards(indexName)).thenReturn(shardCount); + when(node.getNodeId()).thenReturn("test-node"); + + long expectedWeight = (shardCount >= avgPerIndexShardsPerNode) ? CONSTRAINT_WEIGHT : 0; + expectedWeight += perIndexPrimaryShardCount > avgPerIndexPrimaryShardsPerNode ? CONSTRAINT_WEIGHT : 0; + expectedWeight += primaryShardsPerNode >= avgPrimaryShardsPerNode ? CONSTRAINT_WEIGHT : 0; + assertEquals(expectedWeight, constraints.weight(balancer, node, indexName)); + } + } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index 1ba69694eaec1..f5a418bc6a100 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -33,10 +33,12 @@ package org.opensearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; @@ -44,22 +46,37 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.ShardId; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; import org.hamcrest.Matchers; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; @@ -75,7 +92,7 @@ public class BalanceConfigurationTests extends OpenSearchAllocationTestCase { public void testIndexBalance() { /* Tests balance over indices only */ final float indexBalance = 1.0f; - final float replicaBalance = 0.0f; + final float shardBalance = 0.0f; final float balanceThreshold = 1.0f; Settings.Builder settings = Settings.builder(); @@ -84,7 +101,7 @@ public void testIndexBalance() { ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString() ); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); - settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); AllocationService strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); @@ -123,10 +140,428 @@ public void testIndexBalance() { ); } - public void testReplicaBalance() { + private Settings.Builder getSettingsBuilderForPrimaryBalance() { + return getSettingsBuilderForPrimaryBalance(true); + } + + private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance) { + final float indexBalance = 0.55f; + final float shardBalance = 0.45f; + final float balanceThreshold = 1.0f; + + Settings.Builder settings = Settings.builder(); + settings.put( + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString() + ); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); + return settings; + } + + private IndexMetadata getIndexMetadata(String indexName, int shardCount, int replicaCount) { + return IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, shardCount) + .put(SETTING_NUMBER_OF_REPLICAS, replicaCount) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .build(); + } + + /** + * This test verifies that with only primary shard balance, the primary shard distribution per index is balanced. + */ + public void testPrimaryBalance() { + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + + ClusterState clusterState = initCluster(strategy); + verifyPerIndexPrimaryBalance(clusterState); + + clusterState = addNode(clusterState, strategy); + verifyPerIndexPrimaryBalance(clusterState); + + clusterState = removeNodes(clusterState, strategy); + verifyPerIndexPrimaryBalance(clusterState); + } + + /** + * This test verifies primary shard balance is not attained without PREFER_PRIMARY_SHARD_BALANCE setting. + */ + public void testPrimaryBalanceWithoutPreferPrimaryBalanceSetting() { + final int numberOfNodes = 5; + final int numberOfIndices = 5; + final int numberOfShards = 25; + final int numberOfReplicas = 1; + + final int numberOfRuns = 5; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService( + getSettingsBuilderForPrimaryBalance(false).build(), + new TestGatewayAllocator() + ); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPerIndexPrimaryBalance(clusterState); + } catch (AssertionError e) { + balanceFailed++; + logger.info("Expected assertion failure"); + } + } + assertTrue(balanceFailed >= 4); + } + + /** + * This test verifies primary shard balance is attained with PREFER_PRIMARY_SHARD_BALANCE setting. + */ + public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { + final int numberOfNodes = 5; + final int numberOfIndices = 5; + final int numberOfShards = 25; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPerIndexPrimaryBalance(clusterState); + } catch (AssertionError e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed <= 1); + } + + /** + * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min + * constraints chosen for allocation. + * + * This test mimics a cluster state containing four nodes, where one node breaches two constraints while one breaches + * only one. In order to have nodes breach constraints, test excludes two nodes (node2, node3) from allocation so + * that other two nodes (node0, node1) have all shards assignments resulting in constraints breach. Test asserts that + * the new primary shard assignment lands on the node breaching one constraint(node1), while replica land on the other + * (node0). Final shard allocation state. + * + routing_nodes: + -----node_id[node2][V] + -----node_id[node3][V] + -----node_id[node0][V] + --------[test][1], node[node0], [P], s[STARTED], a[id=7B4dVsrjSoC6imBHO60mrQ] + --------[test][0], node[node0], [P], s[STARTED], a[id=0HySaPcyRhiKrH6QLA3evw] + --------[test][2], node[node0], [R], s[STARTED], a[id=pB3iuLKZSC--2yNS0trbgA] + -----node_id[node1][V] + --------[test][2], node[node1], [P], s[STARTED], a[id=QWN_T6xpQiWGSD8GJnX-bQ] + --------[test][1], node[node1], [R], s[STARTED], a[id=ChWdQiOdSdKrTPxwceIu1w] + --------[test][0], node[node1], [R], s[STARTED], a[id=5Adc5JteQ8-lY2xfsHUg-Q] + * + */ + public void testPrimaryBalanceWithContrainstBreaching() { + // Mark node2, node 3 excluded (FilterAllocationDecider) so that all allocations land on node 0, node 1 + Settings.Builder settingsBuilder = getSettingsBuilderForPrimaryBalance(); + settingsBuilder.put("cluster.routing.allocation.exclude._id", "node2,node3"); + + AllocationService strategy = createAllocationService(settingsBuilder.build(), new TestGatewayAllocator()); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + List nodesList = new ArrayList<>(); + for (int i = 0; i < 4; i++) { + final DiscoveryNode node = newNode("node" + i); + discoBuilder = discoBuilder.add(node); + nodesList.add(node.getId()); + } + discoBuilder.localNodeId(newNode("node_0").getId()); + discoBuilder.clusterManagerNodeId(newNode("node_0").getId()); + + Metadata.Builder metadata = Metadata.builder(); + metadata.persistentSettings(settingsBuilder.build()); + RoutingTable.Builder routingTable = RoutingTable.builder(); + // build index metadata + IndexMetadata indexMetadata = getIndexMetadata("test", 3, 1); + // Build index routing table + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()); + ShardId shardId_0 = new ShardId(indexMetadata.getIndex(), 0); + ShardId shardId_1 = new ShardId(indexMetadata.getIndex(), 1); + ShardId shardId_2 = new ShardId(indexMetadata.getIndex(), 2); + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_0 = new IndexShardRoutingTable.Builder(shardId_0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_1 = new IndexShardRoutingTable.Builder(shardId_1); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_2 = new IndexShardRoutingTable.Builder(shardId_2); + indexShardRoutingBuilder_0.addShard(TestShardRouting.newShardRouting(shardId_0, nodesList.get(0), true, ShardRoutingState.STARTED)); + indexShardRoutingBuilder_1.addShard(TestShardRouting.newShardRouting(shardId_1, nodesList.get(0), true, ShardRoutingState.STARTED)); + indexShardRoutingBuilder_0.addShard( + TestShardRouting.newShardRouting(shardId_0, nodesList.get(1), false, ShardRoutingState.STARTED) + ); + indexShardRoutingBuilder_1.addShard( + TestShardRouting.newShardRouting(shardId_1, nodesList.get(1), false, ShardRoutingState.STARTED) + ); + indexShardRoutingBuilder_2.addShard( + TestShardRouting.newShardRouting( + shardId_2, + null, + null, + true, + UNASSIGNED, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + unassignedInfo + ) + ); + indexShardRoutingBuilder_2.addShard( + TestShardRouting.newShardRouting( + shardId_2, + null, + null, + false, + UNASSIGNED, + RecoverySource.PeerRecoverySource.INSTANCE, + unassignedInfo + ) + ); + + IndexShardRoutingTable indexShardRoutingTable_0 = indexShardRoutingBuilder_0.build(); + IndexShardRoutingTable indexShardRoutingTable_1 = indexShardRoutingBuilder_1.build(); + + indexRoutingTable.addIndexShard(indexShardRoutingBuilder_0.build()); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder_1.build()); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder_2.build()); + routingTable.add(indexRoutingTable); + + IndexMetadata.Builder indexMetaDataBuilder = IndexMetadata.builder(indexMetadata); + indexMetaDataBuilder.putInSyncAllocationIds(0, indexShardRoutingTable_0.getAllAllocationIds()); + indexMetaDataBuilder.putInSyncAllocationIds(1, indexShardRoutingTable_1.getAllAllocationIds()); + metadata.put(indexMetaDataBuilder.build(), false); + + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("test")); + stateBuilder.nodes(discoBuilder); + stateBuilder.metadata(metadata.generateClusterUuidIfNeeded().build()); + stateBuilder.routingTable(routingTable.build()); + ClusterState clusterState = stateBuilder.build(); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = applyAllocationUntilNoChange(clusterState, strategy); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId_2).primaryShard(); + List replicaShards = clusterState.routingTable().shardRoutingTable(shardId_2).replicaShards(); + + assertTrue(primaryShard.started()); + assertEquals("node1", primaryShard.currentNodeId()); + + assertEquals(1, replicaShards.size()); + assertTrue(replicaShards.get(0).started()); + assertEquals("node0", replicaShards.get(0).currentNodeId()); + } + + /** + * This test verifies global balance by creating indices iteratively and verify primary shards do not pile up on one + * node. + * @throws Exception + */ + public void testGlobalPrimaryBalance() throws Exception { + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .build(); + clusterState = addNode(clusterState, strategy); + clusterState = addNode(clusterState, strategy); + + clusterState = addIndex(clusterState, strategy, "test-index1", 1, 1); + clusterState = addIndex(clusterState, strategy, "test-index2", 1, 1); + clusterState = addIndex(clusterState, strategy, "test-index3", 1, 1); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState); + } + + /** + * This test mimics a cluster state which can not be rebalanced due to + * {@link org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider} + * allocation decider which prevents shard relocation, leaving cluster unbalanced on primaries. + * + * There are two nodes (N1, N2) where all primaries land on N1 while replicas on N2. + * N1 N2 + * ------ -------- + * P1 R1 + * P2 R2 + * + * -----node_id[node_0][V] + * --------[test][1], node[node_0], [P], s[STARTED], a[id=xqfZSToVSQaff2xvuxh_yA] + * --------[test][0], node[node_0], [P], s[STARTED], a[id=VGjOeBGdSmu3pJR6T7v29A] + * -----node_id[node_1][V] + * --------[test][1], node[node_1], [R], s[STARTED], a[id=zZI0R8FBQkWMNndEZt9d8w] + * --------[test][0], node[node_1], [R], s[STARTED], a[id=8IpwEMQ2QEuj5rQOxBagSA] + */ + public void testPrimaryBalance_NotSolved_1() { + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set nodes = new HashSet<>(); + for (int i = 0; i < 2; i++) { + final DiscoveryNode node = newNode("node_" + i); + discoBuilder = discoBuilder.add(node); + nodes.add(node.getId()); + } + discoBuilder.localNodeId(newNode("node_0").getId()); + discoBuilder.clusterManagerNodeId(newNode("node_0").getId()); + Metadata.Builder metadata = Metadata.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + List nodesList = new ArrayList<>(nodes); + // build index metadata + IndexMetadata indexMetadata = getIndexMetadata("test", 2, 1); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()); + ShardId shardId_0 = new ShardId(indexMetadata.getIndex(), 0); + ShardId shardId_1 = new ShardId(indexMetadata.getIndex(), 1); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_0 = new IndexShardRoutingTable.Builder(shardId_0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_1 = new IndexShardRoutingTable.Builder(shardId_1); + indexShardRoutingBuilder_0.addShard(TestShardRouting.newShardRouting(shardId_0, nodesList.get(0), true, ShardRoutingState.STARTED)); + indexShardRoutingBuilder_1.addShard(TestShardRouting.newShardRouting(shardId_1, nodesList.get(0), true, ShardRoutingState.STARTED)); + indexShardRoutingBuilder_0.addShard( + TestShardRouting.newShardRouting(shardId_0, nodesList.get(1), false, ShardRoutingState.STARTED) + ); + indexShardRoutingBuilder_1.addShard( + TestShardRouting.newShardRouting(shardId_1, nodesList.get(1), false, ShardRoutingState.STARTED) + ); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder_0.build()); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder_1.build()); + metadata.put(indexMetadata, false); + routingTable.add(indexRoutingTable); + + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("test")); + stateBuilder.nodes(discoBuilder); + stateBuilder.metadata(metadata.generateClusterUuidIfNeeded().build()); + stateBuilder.routingTable(routingTable.build()); + ClusterState clusterState = stateBuilder.build(); + + clusterState = strategy.reroute(clusterState, "reroute"); + boolean balanced = true; + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPerIndexPrimaryBalance(clusterState); + } catch (AssertionError e) { + balanced = false; + } + assertFalse(balanced); + } + + /** + * This test mimics cluster state where re-balancing is not possible due to existing limitation of re-balancing + * logic which applies at index level i.e. balance shards single index across all nodes. This will be solved when + * primary shard count across indices, constraint is added. + * + * Please note, P1, P2 belongs to different index + * + * N1 N2 + * ------ -------- + * P1 R1 + * P2 R2 + * + * -----node_id[node_0][V] + * --------[test1][0], node[node_0], [P], s[STARTED], a[id=u7qtyy5AR42hgEa-JpeArg] + * --------[test0][0], node[node_0], [P], s[STARTED], a[id=BQrLSo6sQyGlcLdVvGgqLQ] + * -----node_id[node_1][V] + * --------[test1][0], node[node_1], [R], s[STARTED], a[id=TDqbfvAfSFK6lnv3aOU9bA] + * --------[test0][0], node[node_1], [R], s[STARTED], a[id=E85-jhiEQwuB43u5Wq1mAw] + * + */ + public void testPrimaryBalance_NotSolved_2() { + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set nodes = new HashSet<>(); + for (int i = 0; i < 2; i++) { + final DiscoveryNode node = newNode("node_" + i); + discoBuilder = discoBuilder.add(node); + nodes.add(node.getId()); + } + discoBuilder.localNodeId(newNode("node_0").getId()); + discoBuilder.clusterManagerNodeId(newNode("node_0").getId()); + Metadata.Builder metadata = Metadata.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + List nodesList = new ArrayList<>(nodes); + // build index metadata + IndexMetadata indexMetadata_0 = getIndexMetadata("test0", 1, 1); + IndexMetadata indexMetadata_1 = getIndexMetadata("test1", 1, 1); + IndexRoutingTable.Builder indexRoutingTable_0 = IndexRoutingTable.builder(indexMetadata_0.getIndex()); + IndexRoutingTable.Builder indexRoutingTable_1 = IndexRoutingTable.builder(indexMetadata_1.getIndex()); + ShardId shardId_0 = new ShardId(indexMetadata_0.getIndex(), 0); + ShardId shardId_1 = new ShardId(indexMetadata_1.getIndex(), 0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_0 = new IndexShardRoutingTable.Builder(shardId_0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder_1 = new IndexShardRoutingTable.Builder(shardId_1); + indexShardRoutingBuilder_0.addShard(TestShardRouting.newShardRouting(shardId_0, nodesList.get(0), true, ShardRoutingState.STARTED)); + indexShardRoutingBuilder_1.addShard(TestShardRouting.newShardRouting(shardId_1, nodesList.get(0), true, ShardRoutingState.STARTED)); + indexShardRoutingBuilder_0.addShard( + TestShardRouting.newShardRouting(shardId_0, nodesList.get(1), false, ShardRoutingState.STARTED) + ); + indexShardRoutingBuilder_1.addShard( + TestShardRouting.newShardRouting(shardId_1, nodesList.get(1), false, ShardRoutingState.STARTED) + ); + indexRoutingTable_0.addIndexShard(indexShardRoutingBuilder_0.build()); + indexRoutingTable_1.addIndexShard(indexShardRoutingBuilder_1.build()); + metadata.put(indexMetadata_0, false); + metadata.put(indexMetadata_1, false); + routingTable.add(indexRoutingTable_0); + routingTable.add(indexRoutingTable_1); + ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName("test")); + stateBuilder.nodes(discoBuilder); + stateBuilder.metadata(metadata.generateClusterUuidIfNeeded().build()); + stateBuilder.routingTable(routingTable.build()); + ClusterState clusterState = stateBuilder.build(); + + clusterState = strategy.reroute(clusterState, "reroute"); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + // The cluster is balanced when considering indices individually not balanced when considering global state + verifyPerIndexPrimaryBalance(clusterState); + } + + private void verifyPerIndexPrimaryBalance(ClusterState currentState) { + RoutingNodes nodes = currentState.getRoutingNodes(); + for (ObjectObjectCursor index : currentState.getRoutingTable().indicesRouting()) { + final int totalPrimaryShards = index.value.primaryShardsActive(); + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()); + + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(index.key, STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= avgPrimaryShardsPerNode); + } + } + } + + private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { + assertBusy(() -> { + RoutingNodes nodes = clusterState.getRoutingNodes(); + int totalPrimaryShards = 0; + for (ObjectObjectCursor index : clusterState.getRoutingTable().indicesRouting()) { + totalPrimaryShards += index.value.primaryShardsActive(); + } + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= avgPrimaryShardsPerNode); + } + }, 60, TimeUnit.SECONDS); + } + + public void testShardBalance() { /* Tests balance over replicas only */ final float indexBalance = 0.0f; - final float replicaBalance = 1.0f; + final float shardBalance = 1.0f; final float balanceThreshold = 1.0f; Settings.Builder settings = Settings.builder(); @@ -135,13 +570,13 @@ public void testReplicaBalance() { ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString() ); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); - settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); AllocationService strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); ClusterState clusterState = initCluster(strategy); - assertReplicaBalance( + assertShardBalance( clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, @@ -151,7 +586,7 @@ public void testReplicaBalance() { ); clusterState = addNode(clusterState, strategy); - assertReplicaBalance( + assertShardBalance( clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, @@ -161,7 +596,7 @@ public void testReplicaBalance() { ); clusterState = removeNodes(clusterState, strategy); - assertReplicaBalance( + assertShardBalance( clusterState.getRoutingNodes(), numberOfNodes + 1 - (numberOfNodes + 1) / 2, numberOfIndices, @@ -172,19 +607,54 @@ public void testReplicaBalance() { } private ClusterState initCluster(AllocationService strategy) { + return initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + } + + private ClusterState addIndex( + ClusterState clusterState, + AllocationService strategy, + String indexName, + int numberOfShards, + int numberOfReplicas + ) { + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.getMetadata()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + + IndexMetadata.Builder index = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas); + + metadataBuilder = metadataBuilder.put(index); + routingTableBuilder.addAsNew(index.build()); + + clusterState = ClusterState.builder(clusterState) + .metadata(metadataBuilder.build()) + .routingTable(routingTableBuilder.build()) + .build(); + clusterState = strategy.reroute(clusterState, "index-created"); + return applyAllocationUntilNoChange(clusterState, strategy); + } + + private ClusterState initCluster( + AllocationService strategy, + int numberOfIndices, + int numberOfNodes, + int numberOfShards, + int numberOfReplicas + ) { Metadata.Builder metadataBuilder = Metadata.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); for (int i = 0; i < numberOfIndices; i++) { IndexMetadata.Builder index = IndexMetadata.builder("test" + i) - .settings(settings(Version.CURRENT)) + .settings(settings(Version.CURRENT).build()) .numberOfShards(numberOfShards) .numberOfReplicas(numberOfReplicas); metadataBuilder = metadataBuilder.put(index); } Metadata metadata = metadataBuilder.build(); - for (ObjectCursor cursor : metadata.indices().values()) { routingTableBuilder.addAsNew(cursor.value); } @@ -202,21 +672,13 @@ private ClusterState initCluster(AllocationService strategy) { .routingTable(initialRoutingTable) .build(); clusterState = strategy.reroute(clusterState, "reroute"); - - logger.info("restart all the primary shards, replicas will start initializing"); - clusterState = startInitializingShardsAndReroute(strategy, clusterState); - - logger.info("start the replica shards"); - clusterState = startInitializingShardsAndReroute(strategy, clusterState); - - logger.info("complete rebalancing"); - return applyStartedShardsUntilNoChange(clusterState, strategy); + return applyAllocationUntilNoChange(clusterState, strategy); } private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node" + numberOfNodes))) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node" + clusterState.getRoutingNodes().size()))) .build(); RoutingTable routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -226,7 +688,16 @@ private ClusterState addNode(ClusterState clusterState, AllocationService strate return applyStartedShardsUntilNoChange(clusterState, strategy); } + private ClusterState removeOneNode(ClusterState clusterState, AllocationService strategy) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + nodes.remove("node0"); + clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + clusterState = strategy.disassociateDeadNodes(clusterState, randomBoolean(), "removed nodes"); + return applyAllocationUntilNoChange(clusterState, strategy); + } + private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) { + int numberOfNodes = clusterState.getRoutingNodes().size(); logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); @@ -240,7 +711,10 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService st if (removed) { clusterState = strategy.disassociateDeadNodes(clusterState, randomBoolean(), "removed nodes"); } + return applyAllocationUntilNoChange(clusterState, strategy); + } + private ClusterState applyAllocationUntilNoChange(ClusterState clusterState, AllocationService strategy) { logger.info("start all the primary shards, replicas will start initializing"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); @@ -254,7 +728,7 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService st return applyStartedShardsUntilNoChange(clusterState, strategy); } - private void assertReplicaBalance( + private void assertShardBalance( RoutingNodes nodes, int numberOfNodes, int numberOfIndices, @@ -494,5 +968,4 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } } } - } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index 4c4b362ef7c56..e4f3c4eeeb903 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -61,6 +61,7 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; @@ -681,6 +682,56 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC ); } + public void testIndexCreateBlockWhenNoDataNodeHealthy() { + AllocationService allocation = createAllocationService( + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() + ); + Metadata metadata = Metadata.builder().build(); + RoutingTable routingTable = RoutingTable.builder().build(); + final ClusterState clusterState = applyStartedShardsUntilNoChange( + ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(), + allocation + ); + AtomicInteger countBlocksCalled = new AtomicInteger(); + AtomicBoolean reroute = new AtomicBoolean(false); + AtomicReference> indices = new AtomicReference<>(); + AtomicLong currentTime = new AtomicLong(); + Settings settings = Settings.builder().build(); + DiskThresholdMonitor monitor = new DiskThresholdMonitor( + settings, + () -> clusterState, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null, + currentTime::get, + (reason, priority, listener) -> { + assertTrue(reroute.compareAndSet(false, true)); + assertThat(priority, equalTo(Priority.HIGH)); + listener.onResponse(null); + } + ) { + + @Override + protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, ActionListener listener, boolean readOnly) { + assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); + assertFalse(readOnly); + listener.onResponse(null); + } + + @Override + protected void setIndexCreateBlock(ActionListener listener, boolean indexCreateBlock) { + countBlocksCalled.set(countBlocksCalled.get() + 1); + listener.onResponse(null); + } + }; + + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + monitor.onNewInfo(clusterInfo(builder.build())); + assertTrue(countBlocksCalled.get() == 0); + } + private void assertNoLogging(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages) throws IllegalAccessException { try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java index b3d62ea9c6160..edef8d9747b8f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -188,8 +188,9 @@ public void testRandomClusterPromotesNewestReplica() throws InterruptedException // Pick a random subset of primaries to fail List shardsToFail = new ArrayList<>(); List failedPrimaries = randomSubsetOf(primaries); - failedPrimaries.stream() - .forEach(sr -> { shardsToFail.add(new FailedShard(randomFrom(sr), "failed primary", new Exception(), randomBoolean())); }); + failedPrimaries.stream().forEach(sr -> { + shardsToFail.add(new FailedShard(randomFrom(sr), "failed primary", new Exception(), randomBoolean())); + }); logger.info("--> state before failing shards: {}", state); state = cluster.applyFailedShards(state, shardsToFail); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java index 773c82be55ead..24c6dcff42849 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java @@ -68,10 +68,10 @@ public void testHighWatermarkBreachWithLowShardCount() { final ImmutableOpenMap reservedSpace = new ImmutableOpenMap.Builder< ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace>().fPut(getNodeAndDevNullPath("node_0"), getReservedSpace()) - .fPut(getNodeAndDevNullPath("node_1"), getReservedSpace()) - .fPut(getNodeAndDevNullPath("node_2"), getReservedSpace()) - .fPut(getNodeAndDevNullPath("high_watermark_node_0"), getReservedSpace()) - .build(); + .fPut(getNodeAndDevNullPath("node_1"), getReservedSpace()) + .fPut(getNodeAndDevNullPath("node_2"), getReservedSpace()) + .fPut(getNodeAndDevNullPath("high_watermark_node_0"), getReservedSpace()) + .build(); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes, reservedSpace); ClusterInfoService cis = () -> clusterInfo; allocation = createAllocationService(settings, cis); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java index c4dcae84581cb..0d53e4bf8c4ed 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java @@ -28,6 +28,7 @@ import org.opensearch.gateway.GatewayAllocator; import org.opensearch.test.gateway.TestGatewayAllocator; +import java.util.List; import java.util.Map; import static java.util.Collections.singletonMap; @@ -43,7 +44,7 @@ public class NodeLoadAwareAllocationTests extends OpenSearchAllocationTestCase { public void testNewUnassignedPrimaryAllocationOnOverload() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -136,7 +137,7 @@ public void testNewUnassignedPrimaryAllocationOnOverload() { public void testNoAllocationLimitsOnOverloadForDisabledLoadFactor() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -231,7 +232,7 @@ public void testNoAllocationLimitsOnOverloadForDisabledLoadFactor() { public void testExistingPrimariesAllocationOnOverload() { GatewayAllocator gatewayAllocator = new TestGatewayAllocator(); AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -317,7 +318,7 @@ public void testExistingPrimariesAllocationOnOverload() { logger.info("--> change the overload load factor to zero and verify if unassigned primaries on disk get assigned despite overload"); strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -383,7 +384,7 @@ public void testExistingPrimariesAllocationOnOverload() { public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { GatewayAllocator gatewayAllocator = new TestGatewayAllocator(); AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -494,7 +495,7 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { logger.info("change settings to allow unassigned primaries"); strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -534,7 +535,7 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverload() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 15, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -643,7 +644,7 @@ public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverload() { public void testThreeZoneOneReplicaLimitsShardAllocationOnOverload() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 15, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -738,7 +739,7 @@ public void testThreeZoneOneReplicaLimitsShardAllocationOnOverload() { public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverloadAcrossZones() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 9, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -832,7 +833,7 @@ public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverloadAcrossZones() public void testSingleZoneTwoReplicaLimitsReplicaAllocationOnOverload() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 3, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -893,7 +894,7 @@ public void testSingleZoneTwoReplicaLimitsReplicaAllocationOnOverload() { public void testSingleZoneOneReplicaLimitsReplicaAllocationOnOverload() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 5, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -976,7 +977,7 @@ public void testSingleZoneOneReplicaLimitsReplicaAllocationOnOverload() { public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 15, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -1087,7 +1088,7 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() public void testThreeZoneOneReplicaWithSkewFactorZeroAllShardsAssignedAfterRecovery() { AllocationService strategy = createAllocationServiceWithAdditionalSettings( - org.opensearch.common.collect.Map.of( + Map.of( NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), 15, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), @@ -1201,13 +1202,13 @@ public void testThreeZoneOneReplicaWithSkewFactorZeroAllShardsAssignedAfterRecov private ClusterState removeNodes(ClusterState clusterState, AllocationService allocationService, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); - org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.remove(nodeId)); + List.of(nodeIds).forEach(nodeId -> nodeBuilder.remove(nodeId)); return allocationService.disassociateDeadNodes(ClusterState.builder(clusterState).nodes(nodeBuilder).build(), true, "reroute"); } private ClusterState addNodes(ClusterState clusterState, AllocationService allocationService, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); - org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newNode(nodeId, singletonMap("zone", zone)))); + List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newNode(nodeId, singletonMap("zone", zone)))); clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); return allocationService.reroute(clusterState, "reroute"); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index 789de474d8ce5..ae9799545e6af 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -20,7 +20,6 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RoutingNodes; @@ -35,18 +34,13 @@ import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.test.gateway.TestGatewayAllocator; -import java.net.Inet4Address; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Set; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -66,6 +60,7 @@ public abstract class RemoteShardsBalancerBaseTestCase extends OpenSearchAllocat DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.SEARCH_ROLE ); + protected static final Set SEARCH_ONLY_ROLE = Set.of(DiscoveryNodeRole.SEARCH_ROLE); protected static final int PRIMARIES = 5; protected static final int REPLICAS = 1; @@ -116,52 +111,11 @@ public RoutingAllocation getRoutingAllocation(ClusterState clusterState, Routing ); } - private Map createNodeAttributes(String nodeId) { - Map attr = new HashMap<>(); - attr.put("name", nodeId); - attr.put("node_id", nodeId); - return attr; + public ClusterState createInitialCluster(int localOnlyNodes, int remoteNodes, int localIndices, int remoteIndices) { + return createInitialCluster(localOnlyNodes, remoteNodes, false, localIndices, remoteIndices); } - public ClusterState addNodes(ClusterState clusterState, int nodeCount, boolean isRemote) { - DiscoveryNodes.Builder nb = DiscoveryNodes.builder(clusterState.nodes()); - for (int i = 0; i < nodeCount; i++) { - String id = getNodeId(i, isRemote, "new"); - nb.add(newNode(id, id, isRemote ? SEARCH_DATA_ROLES : MANAGER_DATA_ROLES)); - } - return ClusterState.builder(clusterState).nodes(nb.build()).build(); - } - - public ClusterState addNodeWithIP(ClusterState clusterState, int nodeId, boolean isRemote, String IP) throws UnknownHostException { - TransportAddress ipAddress = new TransportAddress(Inet4Address.getByName(IP), 9200); - DiscoveryNodes.Builder nb = DiscoveryNodes.builder(clusterState.nodes()); - String id = getNodeId(nodeId, isRemote, "new"); - nb.add( - new DiscoveryNode( - id, - id, - ipAddress, - createNodeAttributes(id), - isRemote ? SEARCH_DATA_ROLES : MANAGER_DATA_ROLES, - Version.CURRENT - ) - ); - return ClusterState.builder(clusterState).nodes(nb.build()).build(); - } - - public ClusterState terminateNodes(ClusterState clusterState, AllocationService service, List nodesToTerminate) { - if (nodesToTerminate.isEmpty()) { - return clusterState; - } - logger.info("Terminating following nodes from cluster: [{}]", nodesToTerminate); - DiscoveryNodes.Builder nb = DiscoveryNodes.builder(clusterState.nodes()); - nodesToTerminate.forEach(nb::remove); - clusterState = ClusterState.builder(clusterState).nodes(nb.build()).build(); - clusterState = service.disassociateDeadNodes(clusterState, false, "nodes-terminated"); - return clusterState; - } - - public ClusterState createInitialCluster(int localOnlyNodes, int remoteCapableNodes, int localIndices, int remoteIndices) { + public ClusterState createInitialCluster(int localOnlyNodes, int remoteNodes, boolean remoteOnly, int localIndices, int remoteIndices) { Metadata.Builder mb = Metadata.builder(); for (int i = 0; i < localIndices; i++) { mb.put( @@ -199,9 +153,16 @@ public ClusterState createInitialCluster(int localOnlyNodes, int remoteCapableNo String name = getNodeId(i, false); nb.add(newNode(name, name, MANAGER_DATA_ROLES)); } - for (int i = 0; i < remoteCapableNodes; i++) { - String name = getNodeId(i, true); - nb.add(newNode(name, name, SEARCH_DATA_ROLES)); + if (remoteOnly) { + for (int i = 0; i < remoteNodes; i++) { + String name = getNodeId(i, true); + nb.add(newNode(name, name, SEARCH_ONLY_ROLE)); + } + } else { + for (int i = 0; i < remoteNodes; i++) { + String name = getNodeId(i, true); + nb.add(newNode(name, name, SEARCH_DATA_ROLES)); + } } DiscoveryNodes nodes = nb.build(); return ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).nodes(nodes).build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java index 9a415ed0b339b..8f2db5db969d2 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java @@ -24,7 +24,7 @@ import java.util.stream.Collectors; public class TargetPoolAllocationDeciderTests extends RemoteShardsBalancerBaseTestCase { - public void testTargetPoolAllocationDecisions() { + public void testTargetPoolHybridAllocationDecisions() { ClusterState clusterState = createInitialCluster(3, 3, 2, 2); AllocationService service = this.createRemoteCapableAllocationService(); clusterState = allocateShardsAndBalance(clusterState, service); @@ -111,4 +111,93 @@ public void testTargetPoolAllocationDecisions() { assertEquals(Decision.YES.type(), deciders.shouldAutoExpandToNode(localIdx, localOnlyNode.node(), globalAllocation).type()); assertEquals(Decision.YES.type(), deciders.shouldAutoExpandToNode(remoteIdx, remoteCapableNode.node(), globalAllocation).type()); } + + public void testTargetPoolDedicatedSearchNodeAllocationDecisions() { + ClusterState clusterState = createInitialCluster(3, 3, true, 2, 2); + AllocationService service = this.createRemoteCapableAllocationService(); + clusterState = allocateShardsAndBalance(clusterState, service); + + // Add an unassigned primary shard for force allocation checks + Metadata metadata = Metadata.builder(clusterState.metadata()) + .put(IndexMetadata.builder("test_local_unassigned").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + RoutingTable routingTable = RoutingTable.builder(clusterState.routingTable()) + .addAsNew(metadata.index("test_local_unassigned")) + .build(); + clusterState = ClusterState.builder(clusterState).metadata(metadata).routingTable(routingTable).build(); + + // Add remote index unassigned primary + clusterState = createRemoteIndex(clusterState, "test_remote_unassigned"); + + RoutingNodes defaultRoutingNodes = clusterState.getRoutingNodes(); + RoutingAllocation globalAllocation = getRoutingAllocation(clusterState, defaultRoutingNodes); + + ShardRouting localShard = clusterState.routingTable() + .allShards(getIndexName(0, false)) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + ShardRouting remoteShard = clusterState.routingTable() + .allShards(getIndexName(0, true)) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + ShardRouting unassignedLocalShard = clusterState.routingTable() + .allShards("test_local_unassigned") + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + ShardRouting unassignedRemoteShard = clusterState.routingTable() + .allShards("test_remote_unassigned") + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + IndexMetadata localIdx = globalAllocation.metadata().getIndexSafe(localShard.index()); + IndexMetadata remoteIdx = globalAllocation.metadata().getIndexSafe(remoteShard.index()); + String localNodeId = LOCAL_NODE_PREFIX; + for (RoutingNode routingNode : globalAllocation.routingNodes()) { + if (routingNode.nodeId().startsWith(LOCAL_NODE_PREFIX)) { + localNodeId = routingNode.nodeId(); + break; + } + } + String remoteNodeId = remoteShard.currentNodeId(); + RoutingNode localOnlyNode = defaultRoutingNodes.node(localNodeId); + RoutingNode remoteCapableNode = defaultRoutingNodes.node(remoteNodeId); + + AllocationDeciders deciders = new AllocationDeciders(Collections.singletonList(new TargetPoolAllocationDecider())); + + // Incompatible Pools + assertEquals(Decision.NO.type(), deciders.canAllocate(remoteShard, localOnlyNode, globalAllocation).type()); + assertEquals(Decision.NO.type(), deciders.canAllocate(remoteIdx, localOnlyNode, globalAllocation).type()); + assertEquals(Decision.NO.type(), deciders.canForceAllocatePrimary(unassignedRemoteShard, localOnlyNode, globalAllocation).type()); + // A dedicated search node should not accept local shards and indices. + assertEquals(Decision.NO.type(), deciders.canAllocate(localShard, remoteCapableNode, globalAllocation).type()); + assertEquals(Decision.NO.type(), deciders.canAllocate(localIdx, remoteCapableNode, globalAllocation).type()); + assertEquals( + Decision.NO.type(), + deciders.canForceAllocatePrimary(unassignedLocalShard, remoteCapableNode, globalAllocation).type() + ); + + // Compatible Pools + assertEquals(Decision.YES.type(), deciders.canAllocate(remoteShard, remoteCapableNode, globalAllocation).type()); + assertEquals(Decision.YES.type(), deciders.canAllocate(remoteIdx, remoteCapableNode, globalAllocation).type()); + assertEquals(Decision.YES.type(), deciders.canAllocate(localShard, localOnlyNode, globalAllocation).type()); + assertEquals(Decision.YES.type(), deciders.canAllocate(localIdx, localOnlyNode, globalAllocation).type()); + assertEquals( + Decision.YES.type(), + deciders.canForceAllocatePrimary(unassignedRemoteShard, remoteCapableNode, globalAllocation).type() + ); + assertEquals(Decision.YES.type(), deciders.canForceAllocatePrimary(unassignedLocalShard, localOnlyNode, globalAllocation).type()); + + // Verify only compatible nodes are used for auto expand replica decision for remote index and local index + assertEquals(Decision.NO.type(), deciders.shouldAutoExpandToNode(localIdx, remoteCapableNode.node(), globalAllocation).type()); + assertEquals(Decision.NO.type(), deciders.shouldAutoExpandToNode(remoteIdx, localOnlyNode.node(), globalAllocation).type()); + assertEquals(Decision.YES.type(), deciders.shouldAutoExpandToNode(localIdx, localOnlyNode.node(), globalAllocation).type()); + assertEquals(Decision.YES.type(), deciders.shouldAutoExpandToNode(remoteIdx, remoteCapableNode.node(), globalAllocation).type()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index 0acdbffe3dc4f..7492ea80638ed 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -69,12 +69,9 @@ public static void afterClass() { public void testDefaults() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); throttler.registerClusterManagerTask("create-index", true); for (String key : throttler.THROTTLING_TASK_KEYS.keySet()) { @@ -91,12 +88,9 @@ public void testValidateSettingsForDifferentVersion() { ); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); // set some limit for update snapshot tasks @@ -124,12 +118,9 @@ public void testValidateSettingsForTaskWihtoutRetryOnDataNode() { ); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", false); // set some limit for update snapshot tasks @@ -148,12 +139,9 @@ public void testUpdateSettingsForNullValue() { ); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); // set some limit for put-mapping tasks @@ -181,12 +169,9 @@ public void testSettingsOnBootstrap() { Settings initialSettings = Settings.builder() .put("cluster_manager.throttling.thresholds.put-mapping.value", put_mapping_threshold_value) .build(); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - initialSettings, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(initialSettings, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); // assert that limit is applied on throttler @@ -202,12 +187,9 @@ public void testValidateSettingsForUnknownTask() { ); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); // set some limit for update snapshot tasks int newLimit = randomIntBetween(1, 10); @@ -224,12 +206,9 @@ public void testUpdateThrottlingLimitForBasicSanity() { ); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); // set some limit for update snapshot tasks @@ -254,12 +233,9 @@ public void testValidateSettingForLimit() { ); ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); Settings newSettings = Settings.builder().put("cluster_manager.throttling.thresholds.put-mapping.values", -5).build(); @@ -268,12 +244,9 @@ public void testValidateSettingForLimit() { public void testUpdateLimit() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - new ClusterManagerThrottlingStats() - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); throttler.updateLimit("test", 5); @@ -306,12 +279,9 @@ public void testThrottlingForDisabledThrottlingTask() { ClusterManagerThrottlingStats throttlingStats = new ClusterManagerThrottlingStats(); String taskKey = "test"; ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - throttlingStats - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, throttlingStats); ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask(taskKey, false); // adding limit directly in thresholds @@ -339,12 +309,9 @@ public void testThrottlingForInitialStaticSettingAndVersionCheck() { Settings initialSettings = Settings.builder() .put("cluster_manager.throttling.thresholds.put-mapping.value", put_mapping_threshold_value) .build(); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - initialSettings, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - throttlingStats - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(initialSettings, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, throttlingStats); ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask("put-mapping", true); // verifying adding more tasks then threshold passes @@ -370,12 +337,9 @@ public void testThrottling() { ClusterManagerThrottlingStats throttlingStats = new ClusterManagerThrottlingStats(); String taskKey = "test"; ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler( - Settings.EMPTY, - clusterSettings, - () -> { return clusterService.getMasterService().getMinNodeVersion(); }, - throttlingStats - ); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getMasterService().getMinNodeVersion(); + }, throttlingStats); ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask(taskKey, true); throttler.updateLimit(taskKey, 5); diff --git a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java index 31018d4cef029..b59b70ca60ef8 100644 --- a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java @@ -279,6 +279,87 @@ public void processed(String source) { } } + public void testNoTasksAreDroppedInParallelSubmission() throws BrokenBarrierException, InterruptedException { + int numberOfThreads = randomIntBetween(2, 8); + TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) { + executors[i] = new TaskExecutor(); + } + + int tasksSubmittedPerThread = randomIntBetween(2, 1024); + + CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); + CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + + final TestListener listener = new TestListener() { + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected failure: [{}]", source), e); + failures.add(new Tuple<>(source, e)); + updateLatch.countDown(); + } + + @Override + public void processed(String source) { + updateLatch.countDown(); + } + }; + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + try { + barrier.await(); + CyclicBarrier tasksBarrier = new CyclicBarrier(1 + tasksSubmittedPerThread); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + int taskNumber = j; + Thread taskThread = new Thread(() -> { + try { + tasksBarrier.await(); + submitTask( + "[" + index + "][" + taskNumber + "]", + taskNumber, + ClusterStateTaskConfig.build(randomFrom(Priority.values())), + executors[index], + listener + ); + tasksBarrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new AssertionError(e); + } + }); + // submit tasks per batchingKey in parallel + taskThread.start(); + } + // wait for all task threads to be ready + tasksBarrier.await(); + // wait for all task threads to finish + tasksBarrier.await(); + barrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new AssertionError(e); + } + }); + thread.start(); + } + + // wait for all executor threads to be ready + barrier.await(); + // wait for all executor threads to finish + barrier.await(); + + updateLatch.await(); + + assertThat(failures, empty()); + + for (int i = 0; i < numberOfThreads; i++) { + // assert that total executed tasks is same for every executor as we initiated + assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); + } + } + public void testSingleBatchSubmission() throws InterruptedException { Map tasks = new HashMap<>(); final int numOfTasks = randomInt(10); diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java index 2b943a65ef4d8..589b4d7597947 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java @@ -102,7 +102,9 @@ private void xContentTest(Supplier instanceSupplier) throws IOExceptio AbstractXContentTestCase.xContentTester( this::createParser, () -> new GeometryWrapper(instanceSupplier.get()), - (geometryWrapper, xContentBuilder) -> { geometryWrapper.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); }, + (geometryWrapper, xContentBuilder) -> { + geometryWrapper.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + }, GeometryWrapper::fromXContent ).supportsUnknownFields(true).test(); } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index 7703cb394397e..005c0d7c38b51 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -36,9 +36,21 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.AbstractScopedSettings.SettingUpdater; +import org.opensearch.common.settings.Setting.ByteSizeValueParser; +import org.opensearch.common.settings.Setting.DoubleParser; +import org.opensearch.common.settings.Setting.FloatParser; +import org.opensearch.common.settings.Setting.IntegerParser; +import org.opensearch.common.settings.Setting.LongParser; +import org.opensearch.common.settings.Setting.MemorySizeValueParser; +import org.opensearch.common.settings.Setting.MinMaxTimeValueParser; +import org.opensearch.common.settings.Setting.MinTimeValueParser; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Setting.RegexValidator; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; @@ -58,6 +70,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -311,14 +324,52 @@ public void testValidator() { assertTrue(FooBarValidator.invokedWithDependencies); } + public void testRegexValidator() throws Exception { + // A regex that matches one or more digits + String expectedRegex = "\\d+"; + Pattern expectedPattern = Pattern.compile(expectedRegex); + RegexValidator regexValidator = new RegexValidator(expectedRegex); + + // Test that the pattern is correctly initialized + assertNotNull(expectedPattern); + assertNotNull(regexValidator.getPattern()); + assertEquals(expectedPattern.pattern(), regexValidator.getPattern().pattern()); + + // Test that validate() throws an exception for invalid input + final RegexValidator finalValidator = new RegexValidator(expectedRegex); + assertThrows(IllegalArgumentException.class, () -> finalValidator.validate("foo")); + + try { + regexValidator.validate("123"); + } catch (IllegalArgumentException e) { + fail("Expected validate() to not throw an exception, but it threw " + e); + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + regexValidator.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + regexValidator = new RegexValidator(in); + assertEquals(expectedPattern.pattern(), regexValidator.getPattern().pattern()); + + // Test that validate() throws an exception for invalid input + final RegexValidator newFinalValidator = new RegexValidator(expectedRegex); + assertThrows(IllegalArgumentException.class, () -> newFinalValidator.validate("foo")); + + // Test that validate() does not throw an exception for valid input + try { + regexValidator.validate("123"); + } catch (IllegalArgumentException e) { + fail("Expected validate() to not throw an exception, but it threw " + e); + } + } + } + } + public void testValidatorForFilteredStringSetting() { - final Setting filteredStringSetting = new Setting<>( - "foo.bar", - "foobar", - Function.identity(), - value -> { throw new SettingsException("validate always fails"); }, - Property.Filtered - ); + final Setting filteredStringSetting = new Setting<>("foo.bar", "foobar", Function.identity(), value -> { + throw new SettingsException("validate always fails"); + }, Property.Filtered); final Settings settings = Settings.builder().put(filteredStringSetting.getKey(), filteredStringSetting.getKey() + " value").build(); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> filteredStringSetting.get(settings)); @@ -1045,6 +1096,31 @@ public void testIntWithMinMax() { assertEquals(1, integerSetting.get(Settings.EMPTY).intValue()); } + public void testIntegerParser() throws Exception { + String expectedKey = "test key"; + int expectedMinValue = Integer.MIN_VALUE; + int expectedMaxValue = Integer.MAX_VALUE; + boolean expectedFilteredStatus = true; + IntegerParser integerParser = new IntegerParser(expectedMinValue, expectedMaxValue, expectedKey, expectedFilteredStatus); + + assertEquals(expectedKey, integerParser.getKey()); + assertEquals(expectedMinValue, integerParser.getMin()); + assertEquals(expectedMaxValue, integerParser.getMax()); + assertEquals(expectedFilteredStatus, integerParser.getFilterStatus()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + integerParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + integerParser = new IntegerParser(in); + assertEquals(expectedKey, integerParser.getKey()); + assertEquals(expectedMinValue, integerParser.getMin()); + assertEquals(expectedMaxValue, integerParser.getMax()); + assertEquals(expectedFilteredStatus, integerParser.getFilterStatus()); + } + } + } + // Long public void testLongWithDefaultValue() { @@ -1080,6 +1156,31 @@ public void testLongWithMinMax() { assertEquals(1, longSetting.get(Settings.EMPTY).longValue()); } + public void testLongParser() throws Exception { + String expectedKey = "test key"; + long expectedMinValue = Long.MIN_VALUE; + long expectedMaxValue = Long.MAX_VALUE; + boolean expectedFilteredStatus = true; + LongParser longParser = new LongParser(expectedMinValue, expectedMaxValue, expectedKey, expectedFilteredStatus); + + assertEquals(expectedKey, longParser.getKey()); + assertEquals(expectedMinValue, longParser.getMin()); + assertEquals(expectedMaxValue, longParser.getMax()); + assertEquals(expectedFilteredStatus, longParser.getFilterStatus()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + longParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + longParser = new LongParser(in); + assertEquals(expectedKey, longParser.getKey()); + assertEquals(expectedMinValue, longParser.getMin()); + assertEquals(expectedMaxValue, longParser.getMax()); + assertEquals(expectedFilteredStatus, longParser.getFilterStatus()); + } + } + } + // Float public void testFloatWithDefaultValue() { @@ -1115,6 +1216,31 @@ public void testFloatWithMinMax() { assertEquals(1.2, floatSetting.get(Settings.EMPTY).floatValue(), 0.01); } + public void testFloatParser() throws Exception { + String expectedKey = "test key"; + float expectedMinValue = Float.MIN_VALUE; + float expectedMaxValue = Float.MAX_VALUE; + boolean expectedFilteredStatus = true; + FloatParser floatParser = new FloatParser(expectedMinValue, expectedMaxValue, expectedKey, expectedFilteredStatus); + + assertEquals(expectedKey, floatParser.getKey()); + assertEquals(expectedMinValue, floatParser.getMin(), 0.01); + assertEquals(expectedMaxValue, floatParser.getMax(), 0.01); + assertEquals(expectedFilteredStatus, floatParser.getFilterStatus()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + floatParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + floatParser = new FloatParser(in); + assertEquals(expectedKey, floatParser.getKey()); + assertEquals(expectedMinValue, floatParser.getMin(), 0.01); + assertEquals(expectedMaxValue, floatParser.getMax(), 0.01); + assertEquals(expectedFilteredStatus, floatParser.getFilterStatus()); + } + } + } + // Double public void testDoubleWithDefaultValue() { @@ -1130,7 +1256,7 @@ public void testDoubleWithFallbackValue() { assertEquals(doubleSetting.get(Settings.builder().put("foo.baz", 3.2).build()), Double.valueOf(3.2)); } - public void testDoubleWithMinMax() { + public void testDoubleWithMinMax() throws Exception { Setting doubleSetting = Setting.doubleSetting("foo.bar", 1.2, 0, 10, Property.NodeScope); try { doubleSetting.get(Settings.builder().put("foo.bar", 11.3).build()); @@ -1150,6 +1276,71 @@ public void testDoubleWithMinMax() { assertEquals(1.2, doubleSetting.get(Settings.EMPTY).doubleValue(), 0.01); } + public void testDoubleParser() throws Exception { + String expectedKey = "test key"; + double expectedMinValue = Double.MIN_VALUE; + double expectedMaxValue = Double.MAX_VALUE; + boolean expectedFilteredStatus = true; + DoubleParser doubleParser = new DoubleParser(expectedMinValue, expectedMaxValue, expectedKey, expectedFilteredStatus); + + assertEquals(expectedKey, doubleParser.getKey()); + assertEquals(expectedMinValue, doubleParser.getMin(), 0.01); + assertEquals(expectedMaxValue, doubleParser.getMax(), 0.01); + assertEquals(expectedFilteredStatus, doubleParser.getFilterStatus()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + doubleParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + doubleParser = new DoubleParser(in); + assertEquals(expectedKey, doubleParser.getKey()); + assertEquals(expectedMinValue, doubleParser.getMin(), 0.01); + assertEquals(expectedMaxValue, doubleParser.getMax(), 0.01); + assertEquals(expectedFilteredStatus, doubleParser.getFilterStatus()); + } + } + } + + // ByteSizeValue + public void testByteSizeValueParser() throws Exception { + String expectedKey = "test key"; + ByteSizeValue expectedMinValue = new ByteSizeValue((long) 1); + ByteSizeValue expectedMaxValue = new ByteSizeValue(Long.MAX_VALUE); + ByteSizeValueParser byteSizeValueParser = new ByteSizeValueParser(expectedMinValue, expectedMaxValue, expectedKey); + + assertEquals(expectedKey, byteSizeValueParser.getKey()); + assertEquals(expectedMinValue, byteSizeValueParser.getMin()); + assertEquals(expectedMaxValue, byteSizeValueParser.getMax()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + byteSizeValueParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + byteSizeValueParser = new ByteSizeValueParser(in); + assertEquals(expectedKey, byteSizeValueParser.getKey()); + assertEquals(expectedMinValue, byteSizeValueParser.getMin()); + assertEquals(expectedMaxValue, byteSizeValueParser.getMax()); + } + } + } + + // MemorySizeValue + public void testMemorySizeValueParser() throws Exception { + String expectedKey = "test key"; + MemorySizeValueParser memorySizeValueParser = new MemorySizeValueParser(expectedKey); + + assertEquals(expectedKey, memorySizeValueParser.getKey()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + memorySizeValueParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + memorySizeValueParser = new MemorySizeValueParser(in); + assertEquals(expectedKey, memorySizeValueParser.getKey()); + } + } + } + /** * Only one single scope can be added to any setting */ @@ -1228,6 +1419,58 @@ public void testTimeValue() { assertThat(setting.get(Settings.EMPTY).getMillis(), equalTo(random.getMillis() * factor)); } + public void testMinTimeValueParser() throws Exception { + String expectedKey = "test key"; + TimeValue expectedMinValue = TimeValue.timeValueSeconds(0); + boolean expectedFilteredStatus = true; + MinTimeValueParser minTimeValueParser = new MinTimeValueParser(expectedKey, expectedMinValue, expectedFilteredStatus); + + assertEquals(expectedKey, minTimeValueParser.getKey()); + assertEquals(expectedMinValue, minTimeValueParser.getMin()); + assertEquals(expectedFilteredStatus, minTimeValueParser.getFilterStatus()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + minTimeValueParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + minTimeValueParser = new MinTimeValueParser(in); + assertEquals(expectedKey, minTimeValueParser.getKey()); + assertEquals(expectedMinValue, minTimeValueParser.getMin()); + assertEquals(expectedFilteredStatus, minTimeValueParser.getFilterStatus()); + } + } + } + + public void testMinMaxTimeValueParser() throws Exception { + String expectedKey = "test key"; + TimeValue expectedMinValue = TimeValue.timeValueSeconds(0); + TimeValue expectedMaxValue = TimeValue.MAX_VALUE; + boolean expectedFilteredStatus = true; + MinMaxTimeValueParser minMaxTimeValueParser = new MinMaxTimeValueParser( + expectedKey, + expectedMinValue, + expectedMaxValue, + expectedFilteredStatus + ); + + assertEquals(expectedKey, minMaxTimeValueParser.getKey()); + assertEquals(expectedMinValue, minMaxTimeValueParser.getMin()); + assertEquals(expectedMaxValue, minMaxTimeValueParser.getMax()); + assertEquals(expectedFilteredStatus, minMaxTimeValueParser.getFilterStatus()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + minMaxTimeValueParser.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + minMaxTimeValueParser = new MinMaxTimeValueParser(in); + assertEquals(expectedKey, minMaxTimeValueParser.getKey()); + assertEquals(expectedMinValue, minMaxTimeValueParser.getMin()); + assertEquals(expectedMaxValue, minMaxTimeValueParser.getMax()); + assertEquals(expectedFilteredStatus, minMaxTimeValueParser.getFilterStatus()); + } + } + } + public void testTimeValueBounds() { Setting settingWithLowerBound = Setting.timeSetting( "foo", @@ -1381,16 +1624,14 @@ public void testGroupSettingUpdaterValidator() { validator ); - IllegalArgumentException illegal = expectThrows( - IllegalArgumentException.class, - () -> { updater.getValue(Settings.builder().put("prefix.foo.suffix", 5).put("abc", 2).build(), Settings.EMPTY); } - ); + IllegalArgumentException illegal = expectThrows(IllegalArgumentException.class, () -> { + updater.getValue(Settings.builder().put("prefix.foo.suffix", 5).put("abc", 2).build(), Settings.EMPTY); + }); assertEquals("foo and 2 can't go together", illegal.getMessage()); - illegal = expectThrows( - IllegalArgumentException.class, - () -> { updater.getValue(Settings.builder().put("prefix.bar.suffix", 6).put("abc", 3).build(), Settings.EMPTY); } - ); + illegal = expectThrows(IllegalArgumentException.class, () -> { + updater.getValue(Settings.builder().put("prefix.bar.suffix", 6).put("abc", 3).build(), Settings.EMPTY); + }); assertEquals("no bar", illegal.getMessage()); Settings s = updater.getValue( diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 6848cd2bbc773..523659f00a1a3 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -606,19 +606,17 @@ public void testYamlLegacyList() throws IOException { public void testIndentation() throws Exception { String yaml = "/org/opensearch/common/settings/loader/indentation-settings.yml"; - OpenSearchParseException e = expectThrows( - OpenSearchParseException.class, - () -> { Settings.builder().loadFromStream(yaml, getClass().getResourceAsStream(yaml), false); } - ); + OpenSearchParseException e = expectThrows(OpenSearchParseException.class, () -> { + Settings.builder().loadFromStream(yaml, getClass().getResourceAsStream(yaml), false); + }); assertTrue(e.getMessage(), e.getMessage().contains("malformed")); } public void testIndentationWithExplicitDocumentStart() throws Exception { String yaml = "/org/opensearch/common/settings/loader/indentation-with-explicit-document-start-settings.yml"; - OpenSearchParseException e = expectThrows( - OpenSearchParseException.class, - () -> { Settings.builder().loadFromStream(yaml, getClass().getResourceAsStream(yaml), false); } - ); + OpenSearchParseException e = expectThrows(OpenSearchParseException.class, () -> { + Settings.builder().loadFromStream(yaml, getClass().getResourceAsStream(yaml), false); + }); assertTrue(e.getMessage(), e.getMessage().contains("malformed")); } diff --git a/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java index 9b8eb79112953..c2704391d6a74 100644 --- a/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java @@ -31,6 +31,7 @@ package org.opensearch.common.unit; +import org.opensearch.OpenSearchParseException; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.xcontent.XContentBuilder; @@ -138,6 +139,26 @@ public void testParseFromXContent() throws IOException { } + public void testFuzzinessValidationWithStrings() throws IOException { + String[] invalidStrings = new String[] { "+++", "asdfghjkl", "2k23" }; + XContentBuilder json = jsonBuilder().startObject().field(Fuzziness.X_FIELD_NAME, randomFrom(invalidStrings)).endObject(); + try (XContentParser parser = createParser(json)) { + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> Fuzziness.parse(parser)); + assertTrue(e.getMessage().startsWith("Invalid fuzziness value:")); + } + json = jsonBuilder().startObject().field(Fuzziness.X_FIELD_NAME, "AUTO:").endObject(); + try (XContentParser parser = createParser(json)) { + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING)); + OpenSearchParseException e = expectThrows(OpenSearchParseException.class, () -> Fuzziness.parse(parser)); + assertTrue(e.getMessage().startsWith("failed to find low and high distance values")); + } + } + public void testAuto() { assertThat(Fuzziness.AUTO.asFloat(), equalTo(1f)); } diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java index 8d8ba9872ee61..0760680f0ef2a 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java @@ -224,14 +224,8 @@ public void testExtractValueWithNullValue() throws Exception { assertNull(XContentMapValues.extractValue("object1.missing", map, "NULL")); assertEquals("NULL", XContentMapValues.extractValue("other_field", map, "NULL")); - assertEquals( - org.opensearch.common.collect.List.of("value1", "NULL", "value2"), - XContentMapValues.extractValue("array", map, "NULL") - ); - assertEquals( - org.opensearch.common.collect.List.of("NULL", "value"), - XContentMapValues.extractValue("object_array.field", map, "NULL") - ); + assertEquals(List.of("value1", "NULL", "value2"), XContentMapValues.extractValue("array", map, "NULL")); + assertEquals(List.of("NULL", "value"), XContentMapValues.extractValue("object_array.field", map, "NULL")); assertEquals("NULL", XContentMapValues.extractValue("object1.object2.field", map, "NULL")); } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index 5588e9c1ceba8..1be312194d336 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -218,10 +218,9 @@ public void testMultipleSeedsProviders() { } public void testLazyConstructionSeedsProvider() { - DummyHostsProviderPlugin plugin = () -> Collections.singletonMap( - "custom", - () -> { throw new AssertionError("created hosts provider which was not selected"); } - ); + DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", () -> { + throw new AssertionError("created hosts provider which was not selected"); + }); newModule(Settings.EMPTY, Collections.singletonList(plugin)); } diff --git a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java new file mode 100644 index 0000000000000..97820e6197eec --- /dev/null +++ b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.discovery; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.extensions.ExtensionDependency; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.util.HashMap; +import java.util.List; + +public class InitializeExtensionRequestTests extends OpenSearchTestCase { + + public void testInitializeExtensionRequest() throws Exception { + String expectedUniqueId = "test uniqueid"; + Version expectedVersion = Version.fromString("2.0.0"); + ExtensionDependency expectedDependency = new ExtensionDependency(expectedUniqueId, expectedVersion); + DiscoveryExtensionNode expectedExtensionNode = new DiscoveryExtensionNode( + "firstExtension", + "uniqueid1", + new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), + new HashMap<>(), + Version.CURRENT, + Version.CURRENT, + List.of(expectedDependency) + ); + DiscoveryNode expectedSourceNode = new DiscoveryNode( + "sourceNode", + "uniqueid2", + new TransportAddress(InetAddress.getByName("127.0.0.0"), 1000), + new HashMap<>(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + InitializeExtensionRequest initializeExtensionRequest = new InitializeExtensionRequest(expectedSourceNode, expectedExtensionNode); + assertEquals(expectedExtensionNode, initializeExtensionRequest.getExtension()); + assertEquals(expectedSourceNode, initializeExtensionRequest.getSourceNode()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + initializeExtensionRequest.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + initializeExtensionRequest = new InitializeExtensionRequest(in); + + assertEquals(expectedExtensionNode, initializeExtensionRequest.getExtension()); + assertEquals(expectedSourceNode, initializeExtensionRequest.getSourceNode()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java b/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java new file mode 100644 index 0000000000000..ea7cc96b0e2a4 --- /dev/null +++ b/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.discovery; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class InitializeExtensionResponseTests extends OpenSearchTestCase { + public void testInitializeExtensionResponse() throws Exception { + String expectedName = "testsample-sdk"; + List expectedImplementedInterfaces = new ArrayList<>(Arrays.asList("Action", "Search")); + + InitializeExtensionResponse initializeExtensionResponse = new InitializeExtensionResponse( + expectedName, + expectedImplementedInterfaces + ); + + assertEquals(expectedName, initializeExtensionResponse.getName()); + List implementedInterfaces = initializeExtensionResponse.getImplementedInterfaces(); + assertEquals(expectedImplementedInterfaces.size(), implementedInterfaces.size()); + assertTrue(implementedInterfaces.containsAll(expectedImplementedInterfaces)); + assertTrue(expectedImplementedInterfaces.containsAll(implementedInterfaces)); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + initializeExtensionResponse.writeTo(out); + out.flush(); + try (BytesStreamInput in = new BytesStreamInput(BytesReference.toBytes(out.bytes()))) { + initializeExtensionResponse = new InitializeExtensionResponse(in); + + assertEquals(expectedName, initializeExtensionResponse.getName()); + implementedInterfaces = initializeExtensionResponse.getImplementedInterfaces(); + assertEquals(expectedImplementedInterfaces.size(), implementedInterfaces.size()); + assertTrue(implementedInterfaces.containsAll(expectedImplementedInterfaces)); + assertTrue(expectedImplementedInterfaces.containsAll(implementedInterfaces)); + } + } + } + +} diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index e50aaea009ba6..808d3c0a7ffea 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -591,7 +591,7 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { public void testSearchFileCacheConfiguration() throws IOException { Settings searchRoleSettings = addRoles(buildEnvSettings(Settings.EMPTY), Set.of(DiscoveryNodeRole.SEARCH_ROLE)); - ByteSizeValue cacheSize = new ByteSizeValue(100, ByteSizeUnit.MB); + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); Settings searchRoleSettingsWithConfig = Settings.builder() .put(searchRoleSettings) .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize) diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index 009ff324809b4..f172ebe7d37d9 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -46,6 +46,8 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.Index; @@ -57,16 +59,19 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; +import java.util.Set; import java.util.stream.Stream; import static org.opensearch.env.NodeRepurposeCommand.NO_CLEANUP; import static org.opensearch.env.NodeRepurposeCommand.NO_DATA_TO_CLEAN_UP_FOUND; +import static org.opensearch.env.NodeRepurposeCommand.NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND; import static org.opensearch.env.NodeRepurposeCommand.NO_SHARD_DATA_TO_CLEAN_UP_FOUND; +import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; +import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.clusterManagerNode; import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; +import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.NodeRoles.removeRoles; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -76,15 +81,35 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase { private static final Index INDEX = new Index("testIndex", "testUUID"); private Settings dataClusterManagerSettings; + private Settings dataSearchClusterManagerSettings; private Environment environment; private Path[] nodePaths; - private Settings dataNoClusterManagerSettings; + private Settings dataSearchNoClusterManagerSettings; private Settings noDataNoClusterManagerSettings; private Settings noDataClusterManagerSettings; + private Settings searchNoDataNoClusterManagerSettings; + private Settings noSearchNoClusterManagerSettings; @Before public void createNodePaths() throws IOException { dataClusterManagerSettings = buildEnvSettings(Settings.EMPTY); + Settings defaultSearchSettings = Settings.builder() + .put(dataClusterManagerSettings) + .put(NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), new ByteSizeValue(16, ByteSizeUnit.GB)) + .build(); + + searchNoDataNoClusterManagerSettings = onlyRole(dataClusterManagerSettings, DiscoveryNodeRole.SEARCH_ROLE); + dataSearchClusterManagerSettings = addRoles(defaultSearchSettings, Set.of(DiscoveryNodeRole.SEARCH_ROLE)); + noDataClusterManagerSettings = clusterManagerNode(nonDataNode(dataClusterManagerSettings)); + + dataSearchNoClusterManagerSettings = nonClusterManagerNode(dataSearchClusterManagerSettings); + noSearchNoClusterManagerSettings = nonClusterManagerNode(defaultSearchSettings); + + noDataNoClusterManagerSettings = removeRoles( + dataClusterManagerSettings, + Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE) + ); + environment = TestEnvironment.newEnvironment(dataClusterManagerSettings); try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataClusterManagerSettings, environment)) { nodePaths = nodeEnvironment.nodeDataPaths(); @@ -102,20 +127,13 @@ public void createNodePaths() throws IOException { writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); } } - dataNoClusterManagerSettings = nonClusterManagerNode(dataClusterManagerSettings); - noDataNoClusterManagerSettings = removeRoles( - dataClusterManagerSettings, - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) - ); - - noDataClusterManagerSettings = clusterManagerNode(nonDataNode(dataClusterManagerSettings)); } public void testEarlyExitNoCleanup() throws Exception { createIndexDataFiles(dataClusterManagerSettings, randomInt(10), randomBoolean()); - verifyNoQuestions(dataClusterManagerSettings, containsString(NO_CLEANUP)); - verifyNoQuestions(dataNoClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataSearchClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataSearchNoClusterManagerSettings, containsString(NO_CLEANUP)); } public void testNothingToCleanup() throws Exception { @@ -138,6 +156,7 @@ public void testNothingToCleanup() throws Exception { verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noSearchNoClusterManagerSettings, containsString(NO_FILE_CACHE_DATA_TO_CLEAN_UP_FOUND)); createIndexDataFiles(dataClusterManagerSettings, 0, randomBoolean()); @@ -159,7 +178,7 @@ public void testLocked() throws IOException { } } - public void testCleanupAll() throws Exception { + public void testCleanupDataClusterManager() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); @@ -208,6 +227,84 @@ public void testCleanupShardData() throws Exception { new NodeEnvironment(noDataClusterManagerSettings, environment).close(); } + public void testCleanupSearchNode() throws Exception { + int shardCount = randomIntBetween(1, 10); + boolean verbose = randomBoolean(); + boolean hasClusterState = randomBoolean(); + createIndexDataFiles(searchNoDataNoClusterManagerSettings, shardCount, hasClusterState, true); + + Matcher matcher = allOf( + containsString(NodeRepurposeCommand.shardMessage(shardCount, 1)), + conditionalNot(containsString("testUUID"), verbose == false), + conditionalNot(containsString("testIndex"), verbose == false || hasClusterState == false), + conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) + ); + + verifyUnchangedOnAbort(dataClusterManagerSettings, matcher, verbose); + + // verify test setup + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(dataClusterManagerSettings, environment).close()); + + verifySuccess(dataClusterManagerSettings, matcher, verbose); + + // verify clean. + new NodeEnvironment(dataClusterManagerSettings, environment).close(); + } + + public void testCleanupSearchClusterManager() throws Exception { + int shardCount = randomIntBetween(1, 10); + boolean verbose = randomBoolean(); + boolean hasClusterState = randomBoolean(); + createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, true); + + String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, shardCount, 0); + + Matcher matcher = allOf( + containsString(messageText), + conditionalNot(containsString("testUUID"), verbose == false), + conditionalNot(containsString("testIndex"), verbose == false || hasClusterState == false), + conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) + ); + + verifyUnchangedOnAbort(noSearchNoClusterManagerSettings, matcher, verbose); + + // verify test setup + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noSearchNoClusterManagerSettings, environment).close()); + + verifySuccess(noSearchNoClusterManagerSettings, matcher, verbose); + + // verify clean. + new NodeEnvironment(noSearchNoClusterManagerSettings, environment).close(); + } + + public void testCleanupAll() throws Exception { + int shardCount = randomIntBetween(1, 10); + boolean verbose = randomBoolean(); + boolean hasClusterState = randomBoolean(); + createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, false); + createIndexDataFiles(dataSearchClusterManagerSettings, shardCount, hasClusterState, true); + + // environment.dataFiles().length * shardCount will account for the local shard files + // + shardCount will account for the additional file cache shard files. + String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, (environment.dataFiles().length * shardCount) + shardCount, 0); + + Matcher outputMatcher = allOf( + containsString(messageText), + conditionalNot(containsString("testIndex"), verbose == false || hasClusterState == false), + conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) + ); + + verifyUnchangedOnAbort(noDataNoClusterManagerSettings, outputMatcher, verbose); + + // verify test setup + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoClusterManagerSettings, environment).close()); + + verifySuccess(noDataNoClusterManagerSettings, outputMatcher, verbose); + + // verify cleaned. + new NodeEnvironment(noDataNoClusterManagerSettings, environment).close(); + } + static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { withTerminal(verbose, outputMatcher, terminal -> { terminal.addTextInput(randomFrom("y", "Y")); @@ -256,6 +353,10 @@ private static void executeRepurposeCommand(MockTerminal terminal, Settings sett } private void createIndexDataFiles(Settings settings, int shardCount, boolean writeClusterState) throws IOException { + createIndexDataFiles(settings, shardCount, writeClusterState, false); + } + + private void createIndexDataFiles(Settings settings, int shardCount, boolean writeClusterState, boolean cacheMode) throws IOException { int shardDataDirNumber = randomInt(10); Environment environment = TestEnvironment.newEnvironment(settings); try (NodeEnvironment env = new NodeEnvironment(settings, environment)) { @@ -287,12 +388,23 @@ private void createIndexDataFiles(Settings settings, int shardCount, boolean wri ); } } - for (Path path : env.indexPaths(INDEX)) { + + if (cacheMode) { + Path cachePath = env.fileCacheNodePath().fileCachePath; + cachePath = cachePath.resolve(String.valueOf(env.getNodeLockId())).resolve(INDEX.getUUID()); for (int i = 0; i < shardCount; ++i) { - Files.createDirectories(path.resolve(Integer.toString(shardDataDirNumber))); + Files.createDirectories(cachePath.resolve(Integer.toString(shardDataDirNumber))); shardDataDirNumber += randomIntBetween(1, 10); } + } else { + for (Path path : env.indexPaths(INDEX)) { + for (int i = 0; i < shardCount; ++i) { + Files.createDirectories(path.resolve(Integer.toString(shardDataDirNumber))); + shardDataDirNumber += randomIntBetween(1, 10); + } + } } + } } diff --git a/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java b/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java index eb59c80ac6461..27f1597e5779f 100644 --- a/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/RegisterTransportActionsRequestTests.java @@ -9,20 +9,19 @@ package org.opensearch.extensions; import org.junit.Before; -import org.opensearch.action.admin.indices.create.AutoCreateAction.TransportAction; -import org.opensearch.common.collect.Map; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.util.Set; public class RegisterTransportActionsRequestTests extends OpenSearchTestCase { private RegisterTransportActionsRequest originalRequest; @Before public void setup() { - this.originalRequest = new RegisterTransportActionsRequest("extension-uniqueId", Map.of("testAction", TransportAction.class)); + this.originalRequest = new RegisterTransportActionsRequest("extension-uniqueId", Set.of("testAction")); } public void testRegisterTransportActionsRequest() throws IOException { @@ -31,16 +30,12 @@ public void testRegisterTransportActionsRequest() throws IOException { StreamInput input = output.bytes().streamInput(); RegisterTransportActionsRequest parsedRequest = new RegisterTransportActionsRequest(input); assertEquals(parsedRequest.getTransportActions(), originalRequest.getTransportActions()); - assertEquals(parsedRequest.getTransportActions().get("testAction"), originalRequest.getTransportActions().get("testAction")); assertEquals(parsedRequest.getTransportActions().size(), originalRequest.getTransportActions().size()); assertEquals(parsedRequest.hashCode(), originalRequest.hashCode()); assertTrue(originalRequest.equals(parsedRequest)); } public void testToString() { - assertEquals( - originalRequest.toString(), - "TransportActionsRequest{uniqueId=extension-uniqueId, actions={testAction=class org.opensearch.action.admin.indices.create.AutoCreateAction$TransportAction}}" - ); + assertEquals(originalRequest.toString(), "TransportActionsRequest{uniqueId=extension-uniqueId, actions=[testAction]}"); } } diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java index 6f29cd780aaa8..2d0821a0fb7dd 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -11,7 +11,6 @@ import org.junit.After; import org.junit.Before; import org.opensearch.Version; -import org.opensearch.action.admin.indices.create.AutoCreateAction.TransportAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteableRegistry; @@ -39,6 +38,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; @@ -118,7 +118,7 @@ public void testRegisterAction() { public void testRegisterTransportActionsRequest() { String action = "test-action"; - RegisterTransportActionsRequest request = new RegisterTransportActionsRequest("uniqueid1", Map.of(action, TransportAction.class)); + RegisterTransportActionsRequest request = new RegisterTransportActionsRequest("uniqueid1", Set.of(action)); AcknowledgedResponse response = (AcknowledgedResponse) extensionTransportActionsHandler.handleRegisterTransportActionsRequest( request ); @@ -150,10 +150,7 @@ public void testSendTransportRequestToExtension() throws InterruptedException { ); // Register Action - RegisterTransportActionsRequest registerRequest = new RegisterTransportActionsRequest( - "uniqueid1", - Map.of(action, TransportAction.class) - ); + RegisterTransportActionsRequest registerRequest = new RegisterTransportActionsRequest("uniqueid1", Set.of(action)); AcknowledgedResponse response = (AcknowledgedResponse) extensionTransportActionsHandler.handleRegisterTransportActionsRequest( registerRequest ); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStateTests.java index d1c177ae5964c..8358dc84b2e0e 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStateTests.java @@ -114,9 +114,9 @@ public void testCustomMetadataNoChange() { public void testIndexTemplateValidation() { Metadata metadata = randomMetadata(); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader( - Collections.singletonList(customs -> { throw new IllegalStateException("template is incompatible"); }) - ); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(customs -> { + throw new IllegalStateException("template is incompatible"); + })); String message = expectThrows( IllegalStateException.class, () -> GatewayMetaState.upgradeMetadata(metadata, new MockMetadataIndexUpgradeService(false), metadataUpgrader) diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java index 3c39ec9f03b2a..9a1f3618fcd8a 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java @@ -220,9 +220,9 @@ public void testPreferReplicaWithHighestPrimaryTerm() { allocId2, allocId3 ); - testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 10, 101, 1)); - testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 22, 10, 120, 2)); - testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 22, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 120, 2)); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -253,9 +253,9 @@ public void testPreferReplicaWithNullReplicationCheckpoint() { allocId2, allocId3 ); - testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 10, 101, 1)); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 101, 1)); testAllocator.addData(node2, allocId2, false); - testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 40, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 40, 120, 2)); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -319,9 +319,9 @@ public void testPreferReplicaWithHighestSegmentInfoVersion() { allocId2, allocId3 ); - testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); - testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 3)); - testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 120, 3)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 120, 2)); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -351,9 +351,9 @@ public void testOutOfSyncHighestRepCheckpointIsIgnored() { allocId1, allocId3 ); - testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); - testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); - testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 10, 120, 2)); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 120, 2)); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -384,9 +384,9 @@ public void testPreferAllocatingPreviousPrimaryWithLowerRepCheckpoint() { allocId2, allocId3 ); - testAllocator.addData(node1, allocId1, true, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); - testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); - testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 10, 120, 2)); + testAllocator.addData(node1, allocId1, true, new ReplicationCheckpoint(shardId, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 120, 2)); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index b266335f3d572..663e9e5007a8f 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -100,8 +100,9 @@ public void testSettingsUpdateValidator() { Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, Property.Dynamic, Property.IndexScope); IndexMetadata metadata = newIndexMeta("index", theSettings); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting); - settings.getScopedSettings() - .addSettingsUpdateConsumer(integerSetting, integer::set, (i) -> { if (i == 42) throw new AssertionError("boom"); }); + settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set, (i) -> { + if (i == 42) throw new AssertionError("boom"); + }); assertEquals(version, settings.getIndexVersionCreated()); assertEquals("0xdeadbeef", settings.getUUID()); @@ -654,15 +655,15 @@ public void testArchivedSettingsValidation() { } public void testArchiveBrokenIndexSettings() { - Settings settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( - Settings.EMPTY, - e -> { assert false : "should not have been invoked, no unknown settings"; }, - (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; } - ); + Settings settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings(Settings.EMPTY, e -> { + assert false : "should not have been invoked, no unknown settings"; + }, (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; }); assertSame(settings, Settings.EMPTY); settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( Settings.builder().put("index.refresh_interval", "-200").build(), - e -> { assert false : "should not have been invoked, no invalid settings"; }, + e -> { + assert false : "should not have been invoked, no invalid settings"; + }, (e, ex) -> { assertThat(e.getKey(), equalTo("index.refresh_interval")); assertThat(e.getValue(), equalTo("-200")); @@ -673,11 +674,9 @@ public void testArchiveBrokenIndexSettings() { assertNull(settings.get("index.refresh_interval")); Settings prevSettings = settings; // no double archive - settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( - prevSettings, - e -> { assert false : "should not have been invoked, no unknown settings"; }, - (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; } - ); + settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings(prevSettings, e -> { + assert false : "should not have been invoked, no unknown settings"; + }, (e, ex) -> { assert false : "should not have been invoked, no invalid settings"; }); assertSame(prevSettings, settings); settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java new file mode 100644 index 0000000000000..a050a4c2243db --- /dev/null +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -0,0 +1,207 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.mockito.stubbing.Answer; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static java.util.Arrays.asList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; + +public class SegmentReplicationPressureServiceTests extends OpenSearchIndexLevelReplicationTestCase { + + private static final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueSeconds(5)) + .build(); + + public void testIsSegrepLimitBreached() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + indexInBatches(5, shards, primaryShard); + + SegmentReplicationStats segmentReplicationStats = service.nodeStats(); + Map shardStats = segmentReplicationStats.getShardStats(); + assertEquals(1, shardStats.size()); + SegmentReplicationPerGroupStats groupStats = shardStats.get(primaryShard.shardId()); + assertEquals(0, groupStats.getRejectedRequestCount()); + Set replicas = groupStats.getReplicaStats(); + assertEquals(1, replicas.size()); + SegmentReplicationShardStats replicaStats = replicas.stream().findFirst().get(); + assertEquals(5, replicaStats.getCheckpointsBehindCount()); + + assertBusy( + () -> expectThrows(OpenSearchRejectedExecutionException.class, () -> service.isSegrepLimitBreached(primaryShard.shardId())), + 30, + TimeUnit.SECONDS + ); + assertBusy( + () -> expectThrows(OpenSearchRejectedExecutionException.class, () -> service.isSegrepLimitBreached(primaryShard.shardId())), + 30, + TimeUnit.SECONDS + ); + + // let shard catch up + replicateSegments(primaryShard, shards.getReplicas()); + + segmentReplicationStats = service.nodeStats(); + shardStats = segmentReplicationStats.getShardStats(); + assertEquals(1, shardStats.size()); + groupStats = shardStats.get(primaryShard.shardId()); + assertEquals(2, groupStats.getRejectedRequestCount()); + replicas = groupStats.getReplicaStats(); + assertEquals(1, replicas.size()); + replicaStats = replicas.stream().findFirst().get(); + assertEquals(0, replicaStats.getCheckpointsBehindCount()); + + service.isSegrepLimitBreached(primaryShard.shardId()); + } + } + + public void testIsSegrepLimitBreached_onlyCheckpointLimitBreached() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + indexInBatches(5, shards, primaryShard); + + Set replicationStats = primaryShard.getReplicationStats(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertEquals(5, shardStats.getCheckpointsBehindCount()); + + service.isSegrepLimitBreached(primaryShard.shardId()); + + replicateSegments(primaryShard, shards.getReplicas()); + service.isSegrepLimitBreached(primaryShard.shardId()); + final SegmentReplicationStats segmentReplicationStats = service.nodeStats(); + assertEquals(0, segmentReplicationStats.getShardStats().get(primaryShard.shardId()).getRejectedRequestCount()); + } + } + + public void testIsSegrepLimitBreached_onlyTimeLimitBreached() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + final SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + indexInBatches(1, shards, primaryShard); + + assertBusy(() -> { + Set replicationStats = primaryShard.getReplicationStats(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertTrue(shardStats.getCurrentReplicationTimeMillis() > TimeValue.timeValueSeconds(5).millis()); + }); + + service.isSegrepLimitBreached(primaryShard.shardId()); + replicateSegments(primaryShard, shards.getReplicas()); + service.isSegrepLimitBreached(primaryShard.shardId()); + final SegmentReplicationStats segmentReplicationStats = service.nodeStats(); + assertEquals(0, segmentReplicationStats.getShardStats().get(primaryShard.shardId()).getRejectedRequestCount()); + } + } + + public void testIsSegrepLimitBreached_underStaleNodeLimit() throws Exception { + try (ReplicationGroup shards = createGroup(3, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + indexInBatches(5, shards, primaryShard); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + assertBusy(() -> { + Set replicationStats = primaryShard.getReplicationStats(); + assertEquals(3, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertTrue(shardStats.getCurrentReplicationTimeMillis() > TimeValue.timeValueSeconds(5).millis()); + }); + + expectThrows(OpenSearchRejectedExecutionException.class, () -> service.isSegrepLimitBreached(primaryShard.shardId())); + + SegmentReplicationStats segmentReplicationStats = service.nodeStats(); + assertEquals(1, segmentReplicationStats.getShardStats().get(primaryShard.shardId()).getRejectedRequestCount()); + + // update one replica. 2/3 stale. + final List replicas = shards.getReplicas(); + replicateSegments(primaryShard, asList(replicas.get(0))); + + expectThrows(OpenSearchRejectedExecutionException.class, () -> service.isSegrepLimitBreached(primaryShard.shardId())); + + segmentReplicationStats = service.nodeStats(); + assertEquals(2, segmentReplicationStats.getShardStats().get(primaryShard.shardId()).getRejectedRequestCount()); + + // update second replica - 1/3 stale - should not throw. + replicateSegments(primaryShard, asList(replicas.get(1))); + service.isSegrepLimitBreached(primaryShard.shardId()); + + // catch up all. + replicateSegments(primaryShard, shards.getReplicas()); + service.isSegrepLimitBreached(primaryShard.shardId()); + } + } + + private int indexInBatches(int count, ReplicationGroup shards, IndexShard primaryShard) throws Exception { + int totalDocs = 0; + for (int i = 0; i < count; i++) { + int numDocs = randomIntBetween(100, 200); + totalDocs += numDocs; + shards.indexDocs(numDocs); + primaryShard.refresh("Test"); + } + return totalDocs; + } + + private SegmentReplicationPressureService buildPressureService(Settings settings, IndexShard primaryShard) { + IndicesService indicesService = mock(IndicesService.class); + IndexService indexService = mock(IndexService.class); + when(indicesService.iterator()).thenAnswer((Answer>) invocation -> List.of(indexService).iterator()); + when(indexService.iterator()).thenAnswer((Answer>) invocation -> List.of(primaryShard).iterator()); + when(indicesService.indexService(primaryShard.shardId().getIndex())).thenReturn(indexService); + when(indexService.getShard(primaryShard.shardId().id())).thenReturn(primaryShard); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + + return new SegmentReplicationPressureService(settings, clusterService, indicesService); + } +} diff --git a/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java index cbc189be491cd..739e26ed3d677 100644 --- a/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java @@ -475,13 +475,10 @@ public Map> getTokenFilters() { .build(); IndexSettings exceptionSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { - new AnalysisModule(TestEnvironment.newEnvironment(settings), singletonList(plugin)).getAnalysisRegistry() - .build(exceptionSettings); - } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + new AnalysisModule(TestEnvironment.newEnvironment(settings), singletonList(plugin)).getAnalysisRegistry() + .build(exceptionSettings); + }); assertEquals("Cannot use token filter [exception]", e.getMessage()); } diff --git a/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java b/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java index 0446ac78d4efc..d4b825ce09e92 100644 --- a/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java @@ -124,15 +124,9 @@ public void testParseWordListError() throws IOException { writer.write('\n'); } Environment env = TestEnvironment.newEnvironment(nodeSettings); - RuntimeException ex = expectThrows( - RuntimeException.class, - () -> Analysis.parseWordList( - env, - nodeSettings, - "foo.bar", - s -> { throw new RuntimeException("Error while parsing rule = " + s); } - ) - ); + RuntimeException ex = expectThrows(RuntimeException.class, () -> Analysis.parseWordList(env, nodeSettings, "foo.bar", s -> { + throw new RuntimeException("Error while parsing rule = " + s); + })); assertEquals("Line [1]: Error while parsing rule = abcd", ex.getMessage()); } @@ -146,15 +140,9 @@ public void testParseWordListOutsideConfigDirError() throws IOException { } Settings nodeSettings = Settings.builder().put("foo.bar_path", dict).put(Environment.PATH_HOME_SETTING.getKey(), home).build(); Environment env = TestEnvironment.newEnvironment(nodeSettings); - RuntimeException ex = expectThrows( - RuntimeException.class, - () -> Analysis.parseWordList( - env, - nodeSettings, - "foo.bar", - s -> { throw new RuntimeException("Error while parsing rule = " + s); } - ) - ); + RuntimeException ex = expectThrows(RuntimeException.class, () -> Analysis.parseWordList(env, nodeSettings, "foo.bar", s -> { + throw new RuntimeException("Error while parsing rule = " + s); + })); assertEquals("Line [1]: Invalid rule", ex.getMessage()); } } diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index d960fa910fde6..74ce2d5bba186 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -59,9 +59,9 @@ public class CompletionStatsCacheTests extends OpenSearchTestCase { public void testExceptionsAreNotCached() { final AtomicInteger openCount = new AtomicInteger(); - final CompletionStatsCache completionStatsCache = new CompletionStatsCache( - () -> { throw new OpenSearchException("simulated " + openCount.incrementAndGet()); } - ); + final CompletionStatsCache completionStatsCache = new CompletionStatsCache(() -> { + throw new OpenSearchException("simulated " + openCount.incrementAndGet()); + }); assertThat(expectThrows(OpenSearchException.class, completionStatsCache::get).getMessage(), equalTo("simulated 1")); assertThat(expectThrows(OpenSearchException.class, completionStatsCache::get).getMessage(), equalTo("simulated 2")); diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 26a73c4ea460c..d0d63dc96a86a 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -11,9 +11,11 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.ReferenceManager; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.LocalCheckpointTracker; @@ -120,7 +122,7 @@ public void testUpdateSegments_replicaReceivesSISWithHigherGen() throws IOExcept // flush the primary engine - we don't need any segments, just force a new commit point. engine.flush(true, true); assertEquals(3, engine.getLatestSegmentInfos().getGeneration()); - nrtEngine.updateSegments(engine.getLatestSegmentInfos(), engine.getProcessedLocalCheckpoint()); + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); } @@ -144,7 +146,7 @@ public void testUpdateSegments_replicaReceivesSISWithLowerGen() throws IOExcepti // update the replica with segments_2 from the primary. final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); assertEquals(2, primaryInfos.getGeneration()); - nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); + nrtEngine.updateSegments(primaryInfos); assertEquals(4, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); assertEquals(4, nrtEngine.getLatestSegmentInfos().getGeneration()); assertEquals(primaryInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); @@ -172,13 +174,35 @@ public void testUpdateSegments_replicaCommitsFirstReceivedInfos() throws IOExcep // update replica with the latest primary infos, it will be the same gen, segments_2, ensure it is also committed. final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); assertEquals(2, primaryInfos.getGeneration()); - nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); + nrtEngine.updateSegments(primaryInfos); final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); assertEquals(primaryInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); assertEquals(primaryInfos.getVersion(), lastCommittedSegmentInfos.getVersion()); } } + public void testRefreshOnNRTEngine() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + assertEquals(2, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); + + ReferenceManager referenceManager = nrtEngine.getReferenceManager(Engine.SearcherScope.EXTERNAL); + OpenSearchDirectoryReader readerBeforeRefresh = referenceManager.acquire(); + + nrtEngine.refresh("test refresh"); + OpenSearchDirectoryReader readerAfterRefresh = referenceManager.acquire(); + + // Verify both readers before and after refresh are same and no change in segments + assertSame(readerBeforeRefresh, readerAfterRefresh); + + } + } + public void testTrimTranslogOps() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); diff --git a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java index d6fd079ac59e8..94d74c21bc977 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java @@ -327,10 +327,9 @@ private void doTestRequireDocValues(MappedFieldType ft) { if (ft.hasDocValues()) { ifds.getForField(ft, "test", () -> { throw new UnsupportedOperationException(); }); // no exception } else { - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> ifds.getForField(ft, "test", () -> { throw new UnsupportedOperationException(); }) - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ifds.getForField(ft, "test", () -> { + throw new UnsupportedOperationException(); + })); assertThat(e.getMessage(), containsString("doc values")); } } finally { diff --git a/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java index a2cf04c072892..cc1d3fe17e1d3 100644 --- a/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java @@ -34,7 +34,6 @@ import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.collect.Map; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.xcontent.ToXContent; @@ -49,6 +48,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.function.Predicate; import java.util.function.Supplier; diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 918b86761fe86..9e1d3f1e08bc2 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.collect.List; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.termvectors.TermVectorsService; @@ -44,6 +43,7 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.util.List; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentMapperTests.java index 9a1113d04a6f6..fa6ef72552faf 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentMapperTests.java @@ -266,12 +266,7 @@ public void testMergeMetaForIndexTemplate() throws IOException { b.endObject(); })); - Map expected = org.opensearch.common.collect.Map.of( - "field", - "value", - "object", - org.opensearch.common.collect.Map.of("field1", "value1", "field2", "value2") - ); + Map expected = Map.of("field", "value", "object", Map.of("field1", "value1", "field2", "value2")); assertThat(initMapper.meta(), equalTo(expected)); DocumentMapper updatedMapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text"))); @@ -293,12 +288,7 @@ public void testMergeMetaForIndexTemplate() throws IOException { })); mergedMapper = mergedMapper.merge(updatedMapper.mapping(), MergeReason.INDEX_TEMPLATE); - expected = org.opensearch.common.collect.Map.of( - "field", - "value", - "object", - org.opensearch.common.collect.Map.of("field1", "value1", "field2", "new_value", "field3", "value3") - ); + expected = Map.of("field", "value", "object", Map.of("field1", "value1", "field2", "new_value", "field3", "value3")); assertThat(mergedMapper.meta(), equalTo(expected)); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java index 659042c37d650..6d69e62bc7dd8 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java @@ -1486,4 +1486,156 @@ public void testTypeless() throws IOException { ParsedDocument doc = mapper.parse(source(b -> b.field("foo", "1234"))); assertNull(doc.dynamicMappingsUpdate()); // no update since we reused the existing type } + + public void testDocumentContainsDeepNestedFieldParsing() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + ParsedDocument doc = mapper.parse(source(b -> { + b.startObject("inner1"); + { + b.field("inner1_field1", "inner1_value1"); + b.startObject("inner2"); + { + b.startObject("inner3"); + { + b.field("inner3_field1", "inner3_value1"); + b.field("inner3_field2", "inner3_value2"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + + Mapping update = doc.dynamicMappingsUpdate(); + assertNotNull(update); // dynamic mapping update + + Mapper objMapper = update.root().getMapper("inner1"); + Mapper inner1_field1_mapper = ((ObjectMapper) objMapper).getMapper("inner1_field1"); + assertNotNull(inner1_field1_mapper); + Mapper inner2_mapper = ((ObjectMapper) objMapper).getMapper("inner2"); + assertNotNull(inner2_mapper); + Mapper inner3_mapper = ((ObjectMapper) inner2_mapper).getMapper("inner3"); + assertNotNull(inner3_mapper); + assertThat(doc.rootDoc().get("inner1.inner2.inner3.inner3_field1"), equalTo("inner3_value1")); + } + + public void testDocumentContainsDeepNestedFieldParsingFail() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + long depth_limit = MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getDefault(Settings.EMPTY); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { + for (int i = 1; i <= depth_limit; i++) { + b.startObject("inner" + i); + } + b.field("inner_field", "inner_value"); + for (int i = 1; i <= depth_limit; i++) { + b.endObject(); + } + }))); + + // check that parsing succeeds with valid doc + // after throwing exception + ParsedDocument doc = mapper.parse(source(b -> { + b.startObject("inner1"); + { + b.startObject("inner2"); + { + b.startObject("inner3"); + { + b.field("inner3_field1", "inner3_value1"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + + Mapping update = doc.dynamicMappingsUpdate(); + assertNotNull(update); // dynamic mapping update + Mapper objMapper = update.root().getMapper("inner1"); + Mapper inner2_mapper = ((ObjectMapper) objMapper).getMapper("inner2"); + assertNotNull(inner2_mapper); + Mapper inner3_mapper = ((ObjectMapper) inner2_mapper).getMapper("inner3"); + assertNotNull(inner3_mapper); + assertThat(doc.rootDoc().get("inner1.inner2.inner3.inner3_field1"), equalTo("inner3_value1")); + } + + public void testDocumentContainsDeepNestedFieldParsingShouldFail() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> { b.field("type", "nested"); })); + long depth_limit = MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getDefault(Settings.EMPTY); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { + b.startObject("field"); + b.startArray("inner"); + for (int i = 1; i <= depth_limit; i++) { + b.startArray(); + } + b.startArray().value(0).value(0).endArray(); + for (int i = 1; i <= depth_limit; i++) { + b.endArray(); + } + b.endArray(); + b.endObject(); + }))); + // check parsing success for nested array within allowed depth limit + ParsedDocument doc = mapper.parse(source(b -> { + b.startObject("field"); + b.startArray("inner"); + for (int i = 1; i < depth_limit - 1; i++) { + b.startArray(); + } + b.startArray().value(0).value(0).endArray(); + for (int i = 1; i < depth_limit - 1; i++) { + b.endArray(); + } + b.endArray(); + b.endObject(); + } + + )); + Mapping update = doc.dynamicMappingsUpdate(); + assertNotNull(update); // dynamic mapping update + + } + + // Test nesting upto max allowed depth with combination of nesting in object and array + // object -> array -> object -> array .... + public void testDocumentDeepNestedObjectAndArrayCombination() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + long depth_limit = MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getDefault(Settings.EMPTY); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { + for (int i = 1; i < depth_limit; i++) { + b.startArray("foo" + 1); + b.startObject(); + } + b.startArray("bar"); + b.startArray().value(0).value(0).endArray(); + b.endArray(); + for (int i = 1; i < depth_limit; i++) { + b.endObject(); + b.endArray(); + } + }))); + + // check parsing success for nested array within allowed depth limit + ParsedDocument doc = mapper.parse(source(b -> { + for (int i = 1; i < depth_limit - 1; i++) { + b.startArray("foo" + 1); + b.startObject(); + } + b.startArray("bar"); + b.startArray().value(0).value(0).endArray(); + b.endArray(); + for (int i = 1; i < depth_limit - 1; i++) { + b.endObject(); + b.endArray(); + } + } + + )); + Mapping update = doc.dynamicMappingsUpdate(); + assertNotNull(update); // dynamic mapping update + + } + } diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldTypeLookupTests.java index 9aa9c7d5074e7..583f9ab48edad 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldTypeLookupTests.java @@ -32,13 +32,13 @@ package org.opensearch.index.mapper; -import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Iterator; +import java.util.Set; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java index 1b4c95d9ceb8f..5ace447afd91e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java @@ -56,7 +56,7 @@ public class GeoPointFieldMapperTests extends FieldMapperTestCase2 unsupportedProperties() { - return org.opensearch.common.collect.Set.of("analyzer", "similarity", "doc_values"); + return Set.of("analyzer", "similarity", "doc_values"); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java index 6e3216e934b07..a1622f8e9de09 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java @@ -33,7 +33,6 @@ import org.opensearch.common.Explicit; import org.opensearch.common.Strings; -import org.opensearch.common.collect.List; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; @@ -45,6 +44,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsString; @@ -56,7 +56,7 @@ public class GeoShapeFieldMapperTests extends FieldMapperTestCase2 unsupportedProperties() { - return org.opensearch.common.collect.Set.of("analyzer", "similarity", "doc_values", "store"); + return Set.of("analyzer", "similarity", "doc_values", "store"); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldTypeTests.java index f4767403ffbf7..127117ee9c7b2 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldTypeTests.java @@ -49,13 +49,13 @@ public void testFetchSourceValue() throws IOException { MappedFieldType mapper = new GeoShapeFieldMapper.Builder("field").build(context).fieldType(); - Map jsonLineString = org.opensearch.common.collect.Map.of( + Map jsonLineString = Map.of( "type", "LineString", "coordinates", Arrays.asList(Arrays.asList(42.0, 27.1), Arrays.asList(30.0, 50.0)) ); - Map jsonPoint = org.opensearch.common.collect.Map.of("type", "Point", "coordinates", Arrays.asList(14.0, 15.0)); + Map jsonPoint = Map.of("type", "Point", "coordinates", Arrays.asList(14.0, 15.0)); String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.0 15.0)"; diff --git a/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java index e897abad405d5..93d0596a9bfb5 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java @@ -114,10 +114,9 @@ public void testEnableFieldData() throws IOException { .setTransientSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), false)) .get(); try { - IllegalArgumentException exc = expectThrows( - IllegalArgumentException.class, - () -> ft.fielddataBuilder("test", () -> { throw new UnsupportedOperationException(); }).build(null, null) - ); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> ft.fielddataBuilder("test", () -> { + throw new UnsupportedOperationException(); + }).build(null, null)); assertThat(exc.getMessage(), containsString(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey())); assertFalse(ft.isAggregatable()); } finally { diff --git a/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java index c2c6293eec4bd..36edc4e92504b 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldTypeTests.java @@ -47,11 +47,8 @@ public void testFetchSourceValue() throws IOException { Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath()); RangeFieldMapper mapper = new RangeFieldMapper.Builder("field", RangeType.IP, true, Version.V_EMPTY).build(context); - Map range = org.opensearch.common.collect.Map.of("gte", "2001:db8:0:0:0:0:2:1"); - assertEquals( - Collections.singletonList(org.opensearch.common.collect.Map.of("gte", "2001:db8::2:1")), - fetchSourceValue(mapper.fieldType(), range) - ); + Map range = Map.of("gte", "2001:db8:0:0:0:0:2:1"); + assertEquals(Collections.singletonList(Map.of("gte", "2001:db8::2:1")), fetchSourceValue(mapper.fieldType(), range)); assertEquals(Collections.singletonList("2001:db8::2:1/32"), fetchSourceValue(mapper.fieldType(), "2001:db8:0:0:0:0:2:1/32")); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java index 7059e908c078f..3968742fddfef 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java @@ -134,7 +134,7 @@ protected Collection getPlugins() { protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { return new IndexAnalyzers( singletonMap("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())), - org.opensearch.common.collect.Map.of( + Map.of( "lowercase", new NamedAnalyzer("lowercase", AnalyzerScope.INDEX, new LowercaseNormalizer()), "other_lowercase", diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 6b7216a584ca9..ad529c685d6f3 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -72,6 +72,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; public class KeywordFieldTypeTests extends FieldTypeTestCase { @@ -245,18 +246,12 @@ public void testFetchSourceValue() throws IOException { private static IndexAnalyzers createIndexAnalyzers() { return new IndexAnalyzers( - org.opensearch.common.collect.Map.of("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())), - org.opensearch.common.collect.Map.ofEntries( - org.opensearch.common.collect.Map.entry( - "lowercase", - new NamedAnalyzer("lowercase", AnalyzerScope.INDEX, new LowercaseNormalizer()) - ), - org.opensearch.common.collect.Map.entry( - "other_lowercase", - new NamedAnalyzer("other_lowercase", AnalyzerScope.INDEX, new LowercaseNormalizer()) - ) + Map.of("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())), + Map.ofEntries( + Map.entry("lowercase", new NamedAnalyzer("lowercase", AnalyzerScope.INDEX, new LowercaseNormalizer())), + Map.entry("other_lowercase", new NamedAnalyzer("other_lowercase", AnalyzerScope.INDEX, new LowercaseNormalizer())) ), - org.opensearch.common.collect.Map.of( + Map.of( "lowercase", new NamedAnalyzer( "lowercase", diff --git a/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java index b43b7c51df1c7..a4dc54a74e912 100644 --- a/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java @@ -39,7 +39,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.Explicit; import org.opensearch.common.Strings; -import org.opensearch.common.collect.List; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.SpatialStrategy; @@ -54,6 +53,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.List; import java.util.Set; import static java.util.Collections.singletonMap; @@ -80,7 +80,7 @@ protected LegacyGeoShapeFieldMapper.Builder newBuilder() { @Override protected Set unsupportedProperties() { - return org.opensearch.common.collect.Set.of("analyzer", "similarity", "doc_values", "store"); + return Set.of("analyzer", "similarity", "doc_values", "store"); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldTypeTests.java index 5157232bea1a2..45161bed1d40e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldTypeTests.java @@ -63,13 +63,13 @@ public void testFetchSourceValue() throws IOException { MappedFieldType mapper = new LegacyGeoShapeFieldMapper.Builder("field").build(context).fieldType(); - Map jsonLineString = org.opensearch.common.collect.Map.of( + Map jsonLineString = Map.of( "type", "LineString", "coordinates", Arrays.asList(Arrays.asList(42.0, 27.1), Arrays.asList(30.0, 50.0)) ); - Map jsonPoint = org.opensearch.common.collect.Map.of("type", "Point", "coordinates", Arrays.asList(14.0, 15.0)); + Map jsonPoint = Map.of("type", "Point", "coordinates", Arrays.asList(14.0, 15.0)); String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.0 15.0)"; diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index b58c0bf69c298..e58edb33f0718 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -340,10 +340,9 @@ public void testFieldNameLengthLimit() throws Throwable { ) ); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { mapperService.merge("type", mappingUpdate, updateOrPreflight()); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + mapperService.merge("type", mappingUpdate, updateOrPreflight()); + }); assertEquals("Field name [" + testString + "] is longer than the limit of [" + maxFieldNameLength + "] characters", e.getMessage()); } @@ -371,10 +370,9 @@ public void testObjectNameLengthLimit() throws Throwable { ) ); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { mapperService.merge("type", mapping, updateOrPreflight()); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + mapperService.merge("type", mapping, updateOrPreflight()); + }); assertEquals("Field name [" + testString + "] is longer than the limit of [" + maxFieldNameLength + "] characters", e.getMessage()); } @@ -406,10 +404,9 @@ public void testAliasFieldNameLengthLimit() throws Throwable { ) ); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { mapperService.merge("type", mapping, updateOrPreflight()); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + mapperService.merge("type", mapping, updateOrPreflight()); + }); assertEquals("Field name [" + testString + "] is longer than the limit of [" + maxFieldNameLength + "] characters", e.getMessage()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java index 81d4d8b994ee3..56357540393b2 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java @@ -55,12 +55,12 @@ public class NumberFieldMapperTests extends AbstractNumericFieldMapperTestCase { @Override protected Set types() { - return org.opensearch.common.collect.Set.of("byte", "short", "integer", "long", "float", "double", "half_float"); + return Set.of("byte", "short", "integer", "long", "float", "double", "half_float"); } @Override protected Set wholeTypes() { - return org.opensearch.common.collect.Set.of("byte", "short", "integer", "long"); + return Set.of("byte", "short", "integer", "long"); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index ca5e8d0c6e08d..910f242b59e61 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -553,10 +553,9 @@ public void doTestIndexSortRangeQueries(NumberType type, Supplier valueS // Create an index writer configured with the same index sort. NumberFieldType fieldType = new NumberFieldType("field", type); - IndexNumericFieldData fielddata = (IndexNumericFieldData) fieldType.fielddataBuilder( - "index", - () -> { throw new UnsupportedOperationException(); } - ).build(null, null); + IndexNumericFieldData fielddata = (IndexNumericFieldData) fieldType.fielddataBuilder("index", () -> { + throw new UnsupportedOperationException(); + }).build(null, null); SortField sortField = fielddata.sortField(null, MultiValueMode.MIN, null, randomBoolean()); IndexWriterConfig writerConfig = new IndexWriterConfig(); diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index d6c89342c9df2..07b0ad822209c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -443,10 +443,9 @@ public void testEmptyName() throws Exception { ); // Empty name not allowed in index created after 5.0 - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> { createIndex("test").mapperService().documentMapperParser().parse("", new CompressedXContent(mapping)); } - ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + createIndex("test").mapperService().documentMapperParser().parse("", new CompressedXContent(mapping)); + }); assertThat(e.getMessage(), containsString("name cannot be empty string")); } diff --git a/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java index 3cd6df601089a..5286b06980ee1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java @@ -69,7 +69,7 @@ public class ParametrizedMapperTests extends MapperServiceTestCase { public static class TestPlugin extends Plugin implements MapperPlugin { @Override public Map getMappers() { - return org.opensearch.common.collect.Map.of("test_mapper", new TypeParser()); + return Map.of("test_mapper", new TypeParser()); } } @@ -219,7 +219,7 @@ protected String contentType() { private static TestMapper fromMapping(String mapping, Version version) { MapperService mapperService = mock(MapperService.class); IndexAnalyzers indexAnalyzers = new IndexAnalyzers( - org.opensearch.common.collect.Map.of( + Map.of( "_standard", Lucene.STANDARD_ANALYZER, "_keyword", diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java index 0353173e25696..5e20ff8122d30 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java @@ -68,12 +68,12 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { @Override protected Set types() { - return org.opensearch.common.collect.Set.of("date_range", "ip_range", "float_range", "double_range", "integer_range", "long_range"); + return Set.of("date_range", "ip_range", "float_range", "double_range", "integer_range", "long_range"); } @Override protected Set wholeTypes() { - return org.opensearch.common.collect.Set.of("integer_range", "long_range"); + return Set.of("integer_range", "long_range"); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index d4772f24cca93..668666a53cd7c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -538,20 +538,14 @@ public void testFetchSourceValue() throws IOException { MappedFieldType longMapper = new RangeFieldMapper.Builder("field", RangeType.LONG, true, Version.V_EMPTY).build(context) .fieldType(); - Map longRange = org.opensearch.common.collect.Map.of("gte", 3.14, "lt", "42.9"); - assertEquals( - Collections.singletonList(org.opensearch.common.collect.Map.of("gte", 3L, "lt", 42L)), - fetchSourceValue(longMapper, longRange) - ); + Map longRange = Map.of("gte", 3.14, "lt", "42.9"); + assertEquals(Collections.singletonList(Map.of("gte", 3L, "lt", 42L)), fetchSourceValue(longMapper, longRange)); MappedFieldType dateMapper = new RangeFieldMapper.Builder("field", RangeType.DATE, true, Version.V_EMPTY).format( "yyyy/MM/dd||epoch_millis" ).build(context).fieldType(); - Map dateRange = org.opensearch.common.collect.Map.of("lt", "1990/12/29", "gte", 597429487111L); - assertEquals( - Collections.singletonList(org.opensearch.common.collect.Map.of("lt", "1990/12/29", "gte", "1988/12/06")), - fetchSourceValue(dateMapper, dateRange) - ); + Map dateRange = Map.of("lt", "1990/12/29", "gte", 597429487111L); + assertEquals(Collections.singletonList(Map.of("lt", "1990/12/29", "gte", "1988/12/06")), fetchSourceValue(dateMapper, dateRange)); } public void testParseSourceValueWithFormat() throws IOException { @@ -560,23 +554,14 @@ public void testParseSourceValueWithFormat() throws IOException { MappedFieldType longMapper = new RangeFieldMapper.Builder("field", RangeType.LONG, true, Version.V_EMPTY).build(context) .fieldType(); - Map longRange = org.opensearch.common.collect.Map.of("gte", 3.14, "lt", "42.9"); - assertEquals( - Collections.singletonList(org.opensearch.common.collect.Map.of("gte", 3L, "lt", 42L)), - fetchSourceValue(longMapper, longRange) - ); + Map longRange = Map.of("gte", 3.14, "lt", "42.9"); + assertEquals(Collections.singletonList(Map.of("gte", 3L, "lt", 42L)), fetchSourceValue(longMapper, longRange)); MappedFieldType dateMapper = new RangeFieldMapper.Builder("field", RangeType.DATE, true, Version.V_EMPTY).format("strict_date_time") .build(context) .fieldType(); - Map dateRange = org.opensearch.common.collect.Map.of("lt", "1990-12-29T00:00:00.000Z"); - assertEquals( - Collections.singletonList(org.opensearch.common.collect.Map.of("lt", "1990/12/29")), - fetchSourceValue(dateMapper, dateRange, "yyy/MM/dd") - ); - assertEquals( - Collections.singletonList(org.opensearch.common.collect.Map.of("lt", "662428800000")), - fetchSourceValue(dateMapper, dateRange, "epoch_millis") - ); + Map dateRange = Map.of("lt", "1990-12-29T00:00:00.000Z"); + assertEquals(Collections.singletonList(Map.of("lt", "1990/12/29")), fetchSourceValue(dateMapper, dateRange, "yyy/MM/dd")); + assertEquals(Collections.singletonList(Map.of("lt", "662428800000")), fetchSourceValue(dateMapper, dateRange, "epoch_millis")); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java index a56ab2713d498..4148fe0bfea1b 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java @@ -230,20 +230,9 @@ public TokenStream create(TokenStream tokenStream) { ) ); return new IndexAnalyzers( - org.opensearch.common.collect.Map.of( - "default", - dflt, - "standard", - standard, - "keyword", - keyword, - "whitespace", - whitespace, - "my_stop_analyzer", - stop - ), - org.opensearch.common.collect.Map.of(), - org.opensearch.common.collect.Map.of() + Map.of("default", dflt, "standard", standard, "keyword", keyword, "whitespace", whitespace, "my_stop_analyzer", stop), + Map.of(), + Map.of() ); } @@ -512,10 +501,9 @@ public void testEagerGlobalOrdinals() throws IOException { public void testFielddata() throws IOException { MapperService disabledMapper = createMapperService(fieldMapping(this::minimalMapping)); - Exception e = expectThrows( - IllegalArgumentException.class, - () -> disabledMapper.fieldType("field").fielddataBuilder("test", () -> { throw new UnsupportedOperationException(); }) - ); + Exception e = expectThrows(IllegalArgumentException.class, () -> disabledMapper.fieldType("field").fielddataBuilder("test", () -> { + throw new UnsupportedOperationException(); + })); assertThat(e.getMessage(), containsString("Text fields are not optimised for operations that require per-document field data")); MapperService enabledMapper = createMapperService(fieldMapping(b -> b.field("type", "text").field("fielddata", true))); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java index b9ec5a07b207d..206be8c8352ef 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java @@ -207,18 +207,18 @@ public void testFetchSourceValue() throws IOException { TextFieldType fieldType = createFieldType(); fieldType.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); - assertEquals(org.opensearch.common.collect.List.of("value"), fetchSourceValue(fieldType, "value")); - assertEquals(org.opensearch.common.collect.List.of("42"), fetchSourceValue(fieldType, 42L)); - assertEquals(org.opensearch.common.collect.List.of("true"), fetchSourceValue(fieldType, true)); + assertEquals(List.of("value"), fetchSourceValue(fieldType, "value")); + assertEquals(List.of("42"), fetchSourceValue(fieldType, 42L)); + assertEquals(List.of("true"), fetchSourceValue(fieldType, true)); TextFieldMapper.PrefixFieldType prefixFieldType = new TextFieldMapper.PrefixFieldType(fieldType, "field._index_prefix", 2, 10); - assertEquals(org.opensearch.common.collect.List.of("value"), fetchSourceValue(prefixFieldType, "value")); - assertEquals(org.opensearch.common.collect.List.of("42"), fetchSourceValue(prefixFieldType, 42L)); - assertEquals(org.opensearch.common.collect.List.of("true"), fetchSourceValue(prefixFieldType, true)); + assertEquals(List.of("value"), fetchSourceValue(prefixFieldType, "value")); + assertEquals(List.of("42"), fetchSourceValue(prefixFieldType, 42L)); + assertEquals(List.of("true"), fetchSourceValue(prefixFieldType, true)); TextFieldMapper.PhraseFieldType phraseFieldType = new TextFieldMapper.PhraseFieldType(fieldType); - assertEquals(org.opensearch.common.collect.List.of("value"), fetchSourceValue(phraseFieldType, "value")); - assertEquals(org.opensearch.common.collect.List.of("42"), fetchSourceValue(phraseFieldType, 42L)); - assertEquals(org.opensearch.common.collect.List.of("true"), fetchSourceValue(phraseFieldType, true)); + assertEquals(List.of("value"), fetchSourceValue(phraseFieldType, "value")); + assertEquals(List.of("42"), fetchSourceValue(phraseFieldType, 42L)); + assertEquals(List.of("true"), fetchSourceValue(phraseFieldType, true)); } } diff --git a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java index bf5e7e30a1410..12bab30dc6285 100644 --- a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java @@ -237,12 +237,9 @@ static InnerHitBuilder mutate(InnerHitBuilder original) throws IOException { }); modifiers.add(() -> { if (randomBoolean()) { - copy.setScriptFields( - randomValueOtherThan( - copy.getScriptFields(), - () -> { return new HashSet<>(randomListStuff(16, InnerHitBuilderTests::randomScript)); } - ) - ); + copy.setScriptFields(randomValueOtherThan(copy.getScriptFields(), () -> { + return new HashSet<>(randomListStuff(16, InnerHitBuilderTests::randomScript)); + })); } else { SearchSourceBuilder.ScriptField script = randomScript(); copy.addScriptField(script.fieldName(), script.script()); diff --git a/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java index f0a70b55e0fdd..48d04e2aaa3a6 100644 --- a/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java @@ -111,12 +111,9 @@ public void setup() throws IOException { } public void testCrossFieldMultiMatchQuery() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext( - randomInt(20), - null, - () -> { throw new UnsupportedOperationException(); }, - null - ); + QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); queryShardContext.setAllowUnmappedFields(true); for (float tieBreaker : new float[] { 0.0f, 0.5f }) { Query parsedQuery = multiMatchQuery("banon").field("name.first", 2) @@ -141,14 +138,9 @@ public void testBlendTerms() { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] { 2, 3 }; Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); - Query actual = MultiMatchQuery.blendTerm( - indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), - null, - 1f, - false, - Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3)) - ); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null), new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -160,14 +152,9 @@ public void testBlendTermsWithFieldBoosts() { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] { 200, 30 }; Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); - Query actual = MultiMatchQuery.blendTerm( - indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), - null, - 1f, - false, - Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3)) - ); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null), new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -188,14 +175,9 @@ public Query termQuery(Object value, QueryShardContext context) { ), 1f ); - Query actual = MultiMatchQuery.blendTerm( - indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), - null, - 1f, - true, - Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3)) - ); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null), new BytesRef("baz"), null, 1f, true, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -208,14 +190,9 @@ public Query termQuery(Object value, QueryShardContext context) { }; expectThrows( IllegalArgumentException.class, - () -> MultiMatchQuery.blendTerm( - indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), - null, - 1f, - false, - Arrays.asList(new FieldAndBoost(ft, 1)) - ) + () -> MultiMatchQuery.blendTerm(indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null), new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft, 1))) ); } @@ -232,24 +209,16 @@ public Query termQuery(Object value, QueryShardContext context) { Query expectedDisjunct1 = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query expectedDisjunct2 = new BoostQuery(new MatchAllDocsQuery(), 3); Query expected = new DisjunctionMaxQuery(Arrays.asList(expectedDisjunct2, expectedDisjunct1), 1.0f); - Query actual = MultiMatchQuery.blendTerm( - indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), - null, - 1f, - false, - Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3)) - ); + Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null), new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } public void testMultiMatchCrossFieldsWithSynonyms() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext( - randomInt(20), - null, - () -> { throw new UnsupportedOperationException(); }, - null - ); + QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); parser.setAnalyzer(new MockSynonymAnalyzer()); @@ -279,12 +248,9 @@ public void testMultiMatchCrossFieldsWithSynonyms() throws IOException { } public void testMultiMatchCrossFieldsWithSynonymsPhrase() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext( - randomInt(20), - null, - () -> { throw new UnsupportedOperationException(); }, - null - ); + QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); parser.setAnalyzer(new MockSynonymAnalyzer()); Map fieldNames = new HashMap<>(); @@ -345,12 +311,9 @@ public void testKeywordSplitQueriesOnWhitespace() throws IOException { .endObject() ); mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); - QueryShardContext queryShardContext = indexService.newQueryShardContext( - randomInt(20), - null, - () -> { throw new UnsupportedOperationException(); }, - null - ); + QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); Map fieldNames = new HashMap<>(); fieldNames.put("field", 1.0f); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 8ea64e71fb9dc..ee5307320cbfb 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -46,7 +46,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.IndexSettingsModule; import java.io.IOException; @@ -72,6 +75,7 @@ import java.util.stream.Stream; import static java.util.Collections.emptySet; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; @@ -1770,6 +1774,78 @@ public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); } + public void testSegmentReplicationCheckpointTracking() { + Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + final long initialClusterStateVersion = randomNonNegativeLong(); + final int numberOfActiveAllocationsIds = randomIntBetween(2, 16); + final int numberOfInitializingIds = randomIntBetween(2, 16); + final Tuple, Set> activeAndInitializingAllocationIds = randomActiveAndInitializingAllocationIds( + numberOfActiveAllocationsIds, + numberOfInitializingIds + ); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingIds = activeAndInitializingAllocationIds.v2(); + AllocationId primaryId = activeAllocationIds.iterator().next(); + IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + // get insync ids, filter out the primary. + final Set inSyncAllocationIds = tracker.getReplicationGroup() + .getInSyncAllocationIds() + .stream() + .filter(id -> tracker.shardAllocationId.equals(id) == false) + .collect(Collectors.toSet()); + + final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint(tracker.shardId(), 0L, 1, 1, 1L); + final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint(tracker.shardId(), 0L, 2, 2, 50L); + final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint(tracker.shardId(), 0L, 2, 3, 100L); + + tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.setLatestReplicationCheckpoint(secondCheckpoint); + tracker.setLatestReplicationCheckpoint(thirdCheckpoint); + + Set groupStats = tracker.getSegmentReplicationStats(); + assertEquals(inSyncAllocationIds.size(), groupStats.size()); + for (SegmentReplicationShardStats shardStat : groupStats) { + assertEquals(3, shardStat.getCheckpointsBehindCount()); + assertEquals(100L, shardStat.getBytesBehindCount()); + } + + // simulate replicas moved up to date. + final Map checkpoints = tracker.checkpoints; + for (String id : inSyncAllocationIds) { + final ReplicationTracker.CheckpointState checkpointState = checkpoints.get(id); + assertEquals(3, checkpointState.checkpointTimers.size()); + tracker.updateVisibleCheckpointForShard(id, initialCheckpoint); + assertEquals(2, checkpointState.checkpointTimers.size()); + } + + groupStats = tracker.getSegmentReplicationStats(); + assertEquals(inSyncAllocationIds.size(), groupStats.size()); + for (SegmentReplicationShardStats shardStat : groupStats) { + assertEquals(2, shardStat.getCheckpointsBehindCount()); + assertEquals(99L, shardStat.getBytesBehindCount()); + } + + for (String id : inSyncAllocationIds) { + final ReplicationTracker.CheckpointState checkpointState = checkpoints.get(id); + assertEquals(2, checkpointState.checkpointTimers.size()); + tracker.updateVisibleCheckpointForShard(id, thirdCheckpoint); + assertEquals(0, checkpointState.checkpointTimers.size()); + } + + groupStats = tracker.getSegmentReplicationStats(); + assertEquals(inSyncAllocationIds.size(), groupStats.size()); + for (SegmentReplicationShardStats shardStat : groupStats) { + assertEquals(0, shardStat.getCheckpointsBehindCount()); + assertEquals(0L, shardStat.getBytesBehindCount()); + } + } + public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOException { Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); diff --git a/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java index 28e29ec0d0249..1b7bdaf710de4 100644 --- a/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java @@ -625,11 +625,9 @@ public void testFailingListenerAfterTimeout() throws InterruptedException { }).when(mockLogger).warn(any(String.class), any(RuntimeException.class)); final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, scheduler, mockLogger); final TimeValue timeout = TimeValue.timeValueMillis(randomIntBetween(1, 50)); - globalCheckpointListeners.add( - NO_OPS_PERFORMED, - maybeMultipleInvocationProtectingListener((g, e) -> { throw new RuntimeException("failure"); }), - timeout - ); + globalCheckpointListeners.add(NO_OPS_PERFORMED, maybeMultipleInvocationProtectingListener((g, e) -> { + throw new RuntimeException("failure"); + }), timeout); latch.await(); final ArgumentCaptor message = ArgumentCaptor.forClass(String.class); final ArgumentCaptor t = ArgumentCaptor.forClass(RuntimeException.class); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java index 9d73fa3524a8c..6e130c1938b4f 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java @@ -226,18 +226,12 @@ public void testOperationsIfClosed() { public void testBlockIfClosed() { permits.close(); - expectThrows( - IndexShardClosedException.class, - () -> permits.blockOperations(randomInt(10), TimeUnit.MINUTES, () -> { throw new IllegalArgumentException("fake error"); }) - ); - expectThrows( - IndexShardClosedException.class, - () -> permits.asyncBlockOperations( - wrap(() -> { throw new IllegalArgumentException("fake error"); }), - randomInt(10), - TimeUnit.MINUTES - ) - ); + expectThrows(IndexShardClosedException.class, () -> permits.blockOperations(randomInt(10), TimeUnit.MINUTES, () -> { + throw new IllegalArgumentException("fake error"); + })); + expectThrows(IndexShardClosedException.class, () -> permits.asyncBlockOperations(wrap(() -> { + throw new IllegalArgumentException("fake error"); + }), randomInt(10), TimeUnit.MINUTES)); } public void testOperationsDelayedIfBlock() throws ExecutionException, InterruptedException, TimeoutException { diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index e9d22dfe4b4d6..4926b9dcd6f9b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2113,11 +2113,9 @@ public void testRelocatedSegRepError() throws IOException, InterruptedException IndexShardTestCase.updateRoutingEntry(shard, routing); ReplicationFailedException segRepException = expectThrows( ReplicationFailedException.class, - () -> shard.relocated( - routing.getTargetRelocatingShard().allocationId().getId(), - primaryContext -> {}, - () -> { throw new ReplicationFailedException("Segment replication failed"); } - ) + () -> shard.relocated(routing.getTargetRelocatingShard().allocationId().getId(), primaryContext -> {}, () -> { + throw new ReplicationFailedException("Segment replication failed"); + }) ); assertTrue(segRepException.getMessage().equals("Segment replication failed")); closeShards(shard); @@ -2900,11 +2898,9 @@ public void testReaderWrapperWorksWithGlobalOrdinals() throws IOException { new NoneCircuitBreakerService(), shard.mapperService() ); - IndexFieldData.Global ifd = indexFieldDataService.getForField( - foo, - "test", - () -> { throw new UnsupportedOperationException("search lookup not available"); } - ); + IndexFieldData.Global ifd = indexFieldDataService.getForField(foo, "test", () -> { + throw new UnsupportedOperationException("search lookup not available"); + }); FieldDataStats before = shard.fieldData().stats("foo"); assertThat(before.getMemorySizeInBytes(), equalTo(0L)); FieldDataStats after = null; diff --git a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java index 8d89cc2383aeb..98f86758ea2ca 100644 --- a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java @@ -138,7 +138,9 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest SearchOperationListener throwingListener = (SearchOperationListener) Proxy.newProxyInstance( SearchOperationListener.class.getClassLoader(), new Class[] { SearchOperationListener.class }, - (a, b, c) -> { throw new RuntimeException(); } + (a, b, c) -> { + throw new RuntimeException(); + } ); int throwingListeners = 0; final List indexingOperationListeners = new ArrayList<>(Arrays.asList(listener, listener)); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index d554af0ffc488..014a37249612b 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,7 +8,6 @@ package org.opensearch.index.shard; -import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.junit.Assert; import org.opensearch.ExceptionsHelper; @@ -155,7 +154,6 @@ public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { private void assertReplicationCheckpoint(IndexShard shard, SegmentInfos segmentInfos, ReplicationCheckpoint checkpoint) throws IOException { assertNotNull(segmentInfos); - assertEquals(checkpoint.getSeqNo(), shard.getEngine().getMaxSeqNoFromSegmentInfos(segmentInfos)); assertEquals(checkpoint.getSegmentInfosVersion(), segmentInfos.getVersion()); assertEquals(checkpoint.getSegmentsGen(), segmentInfos.getGeneration()); } @@ -259,7 +257,7 @@ public void testPublishCheckpointOnPrimaryMode() throws IOException { refreshListener.afterRefresh(true); // verify checkpoint is published - verify(mock, times(1)).publish(any()); + verify(mock, times(1)).publish(any(), any()); closeShards(shard); } @@ -281,7 +279,7 @@ public void testPublishCheckpointAfterRelocationHandOff() throws IOException { refreshListener.afterRefresh(true); // verify checkpoint is not published - verify(mock, times(0)).publish(any()); + verify(mock, times(0)).publish(any(), any()); closeShards(shard); } @@ -308,7 +306,7 @@ public void testRejectCheckpointOnShardRoutingPrimary() throws IOException { assertEquals(false, primaryShard.getReplicationTracker().isPrimaryMode()); assertEquals(true, primaryShard.routingEntry().primary()); - spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L, 0L), spyShard); + spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L), spyShard); // Verify that checkpoint is not processed as shard routing is primary. verify(spy, times(0)).startReplication(any(), any(), any()); @@ -570,21 +568,15 @@ public void testReplicaReceivesLowerGeneration() throws Exception { numDocs = randomIntBetween(numDocs + 1, numDocs + 10); shards.indexDocs(numDocs); flushShard(primary, false); - assertLatestCommitGen(4, primary); replicateSegments(primary, List.of(replica_1)); assertEqualCommittedSegments(primary, replica_1); - assertLatestCommitGen(4, primary); - assertLatestCommitGen(5, replica_1); - assertLatestCommitGen(3, replica_2); shards.promoteReplicaToPrimary(replica_2).get(); primary.close("demoted", false); primary.store().close(); IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); shards.recoverReplica(oldPrimary); - assertLatestCommitGen(5, oldPrimary); - assertLatestCommitGen(5, replica_2); numDocs = randomIntBetween(numDocs + 1, numDocs + 10); shards.indexDocs(numDocs); @@ -1024,8 +1016,6 @@ private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCoun // assigned seqNos start at 0, so assert max & local seqNos are 1 less than our persisted doc count. assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getMaxSeqNo()); assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getLocalCheckpoint()); - // processed cp should be 1 less than our searchable doc count. - assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); } private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { @@ -1081,14 +1071,6 @@ private IndexShard failAndPromoteRandomReplica(ReplicationGroup shards) throws I return newPrimary; } - private void assertLatestCommitGen(long expected, IndexShard... shards) throws IOException { - for (IndexShard indexShard : shards) { - try (final GatedCloseable commit = indexShard.acquireLastIndexCommit(false)) { - assertEquals(expected, commit.get().getGeneration()); - } - } - } - private void assertEqualCommittedSegments(IndexShard primary, IndexShard... replicas) throws IOException { for (IndexShard replica : replicas) { final SegmentInfos replicaInfos = replica.store().readLastCommittedSegmentsInfo(); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java index 443325b8716c0..e445d4b78de9d 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java @@ -148,7 +148,7 @@ public void testGetRootPaths() throws IOException { } public void testLoadFileCachePath() throws IOException { - Settings searchNodeSettings = Settings.builder().put("node.roles", "search").put(PATH_SHARED_DATA_SETTING.getKey(), "").build(); + Settings searchNodeSettings = Settings.builder().put("node.roles", "search").build(); try (NodeEnvironment env = newNodeEnvironment(searchNodeSettings)) { ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); diff --git a/server/src/test/java/org/opensearch/index/similarity/ScriptedSimilarityTests.java b/server/src/test/java/org/opensearch/index/similarity/ScriptedSimilarityTests.java index ef935ba64f1ca..f4282b8221c05 100644 --- a/server/src/test/java/org/opensearch/index/similarity/ScriptedSimilarityTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/ScriptedSimilarityTests.java @@ -111,10 +111,9 @@ public double execute( ) { StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); - if (Arrays.stream(stackTraceElements) - .anyMatch( - ste -> { return ste.getClassName().endsWith(".TermScorer") && ste.getMethodName().equals("score"); } - ) == false) { + if (Arrays.stream(stackTraceElements).anyMatch(ste -> { + return ste.getClassName().endsWith(".TermScorer") && ste.getMethodName().equals("score"); + }) == false) { // this might happen when computing max scores return Float.MAX_VALUE; } @@ -210,10 +209,9 @@ public double execute( ) { StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); - if (Arrays.stream(stackTraceElements) - .anyMatch( - ste -> { return ste.getClassName().endsWith(".TermScorer") && ste.getMethodName().equals("score"); } - ) == false) { + if (Arrays.stream(stackTraceElements).anyMatch(ste -> { + return ste.getClassName().endsWith(".TermScorer") && ste.getMethodName().equals("score"); + }) == false) { // this might happen when computing max scores return Float.MAX_VALUE; } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 97575248b4ad3..15f1585bd1477 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -15,7 +15,6 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; -import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -25,6 +24,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 8b7a2e8e06207..956279c3ea048 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -21,7 +21,6 @@ import org.junit.Before; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.collect.Set; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; @@ -34,6 +33,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; @@ -162,7 +162,7 @@ private Map getDummyMetadata(String prefix, int commitGeneration * Prepares metadata file bytes with header and footer * @param segmentFilesMap: actual metadata content * @return ByteArrayIndexInput: metadata file bytes with header and footer - * @throws IOException + * @throws IOException IOException */ private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap) throws IOException { BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 997f8702c836d..72ac9837537e1 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -10,15 +10,16 @@ import org.apache.lucene.store.IndexInput; import org.junit.Before; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory; import org.opensearch.index.store.remote.utils.cache.CacheUsage; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; public class FileCacheTests extends OpenSearchTestCase { // need concurrency level to be static to make these tests more deterministic because capacity per segment is dependent on @@ -35,14 +36,27 @@ public void init() throws Exception { path = createTempDir("FileCacheTests"); } - private FileCache createFileCache(long capaticy) { - return FileCacheFactory.createConcurrentLRUFileCache(capaticy, CONCURRENCY_LEVEL); + private FileCache createFileCache(long capacity) { + return FileCacheFactory.createConcurrentLRUFileCache(capacity, CONCURRENCY_LEVEL); } private Path createPath(String middle) { return path.resolve(middle).resolve(FAKE_PATH_SUFFIX); } + @SuppressForbidden(reason = "creating a test file for cache") + private void createFile(String nodeId, String indexName, String shardId, String fileName) throws IOException { + Path folderPath = path.resolve(NodeEnvironment.CACHE_FOLDER) + .resolve(nodeId) + .resolve(indexName) + .resolve(shardId) + .resolve(RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION); + Path filePath = folderPath.resolve(fileName); + Files.createDirectories(folderPath); + Files.createFile(filePath); + Files.write(filePath, "test-data".getBytes()); + } + public void testCreateCacheWithSmallSegments() { assertThrows(IllegalStateException.class, () -> { FileCacheFactory.createConcurrentLRUFileCache(1000, CONCURRENCY_LEVEL); }); } @@ -73,36 +87,23 @@ public void testPutThrowException() { }); } - public void testComputeIfPresent() { + public void testCompute() { FileCache fileCache = createFileCache(GIGA_BYTES); Path path = createPath("0"); fileCache.put(path, new FakeIndexInput(8 * MEGA_BYTES)); fileCache.incRef(path); - fileCache.computeIfPresent(path, (p, i) -> null); + fileCache.compute(path, (p, i) -> null); // item will be removed assertEquals(fileCache.size(), 0); } - public void testComputeIfPresentThrowException() { + public void testComputeThrowException() { assertThrows(NullPointerException.class, () -> { FileCache fileCache = createFileCache(GIGA_BYTES); - fileCache.computeIfPresent(null, null); + fileCache.compute(null, null); }); } - public void testPutAll() { - FileCache fileCache = createFileCache(GIGA_BYTES); - Map blockMaps = new HashMap<>(); - for (int i = 0; i < 4; i++) { - blockMaps.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); - } - fileCache.putAll(blockMaps); - // verify all blocks are put into file cache - for (int i = 0; i < 4; i++) { - assertNotNull(fileCache.get(createPath(Integer.toString(i)))); - } - } - public void testRemove() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { @@ -124,33 +125,21 @@ public void testRemoveThrowException() { }); } - public void testRemoveAll() { - FileCache fileCache = createFileCache(GIGA_BYTES); - List blockPathList = new ArrayList<>(); - for (int i = 0; i < 4; i++) { - Path blockPath = createPath(Integer.toString(i)); - fileCache.put(blockPath, new FakeIndexInput(8 * MEGA_BYTES)); - blockPathList.add(blockPath); - } - fileCache.removeAll(blockPathList); - assertEquals(fileCache.size(), 0); - } - public void testIncDecRef() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); - fileCache.incRef(createPath(Integer.toString(i))); } // try to evict previous IndexInput for (int i = 1000; i < 3000; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } // IndexInput with refcount greater than 0 will not be evicted for (int i = 0; i < 4; i++) { assertNotNull(fileCache.get(createPath(Integer.toString(i)))); + fileCache.decRef(createPath(Integer.toString(i))); } // decrease ref @@ -160,7 +149,7 @@ public void testIncDecRef() { // try to evict previous IndexInput again for (int i = 3000; i < 5000; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } for (int i = 0; i < 4; i++) { @@ -201,7 +190,7 @@ public void testSize() { public void testPrune() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } // before prune assertEquals(fileCache.size(), 4); @@ -212,11 +201,10 @@ public void testPrune() { } public void testUsage() { - // edge case, all Indexinput will be evicted as soon as they are put into file cache - FileCache fileCache = createFileCache(MEGA_BYTES); - fileCache.put(createPath("0"), new FakeIndexInput(8 * MEGA_BYTES)); + FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(16 * MEGA_BYTES, 1); + putAndDecRef(fileCache, 0, 16 * MEGA_BYTES); - CacheUsage expectedCacheUsage = new CacheUsage(0, 0); + CacheUsage expectedCacheUsage = new CacheUsage(16 * MEGA_BYTES, 0); CacheUsage realCacheUsage = fileCache.usage(); assertEquals(expectedCacheUsage.activeUsage(), realCacheUsage.activeUsage()); assertEquals(expectedCacheUsage.usage(), realCacheUsage.usage()); @@ -238,13 +226,31 @@ public void testStats() { // do some eviction here for (int i = 0; i < 2000; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } assertTrue(fileCache.stats().evictionCount() > 0); assertTrue(fileCache.stats().evictionWeight() > 0); } + public void testCacheRestore() throws IOException { + String nodeId = "0"; + String indexName = "test-index"; + String shardId = "0"; + createFile(nodeId, indexName, shardId, "test.0"); + FileCache fileCache = createFileCache(GIGA_BYTES); + assertEquals(0, fileCache.usage().usage()); + Path fileCachePath = path.resolve(NodeEnvironment.CACHE_FOLDER).resolve(nodeId).resolve(indexName).resolve(shardId); + fileCache.restoreFromDirectory(List.of(fileCachePath)); + assertTrue(fileCache.usage().usage() > 0); + } + + private void putAndDecRef(FileCache cache, int path, long indexInputSize) { + final Path key = createPath(Integer.toString(path)); + cache.put(key, new FakeIndexInput(indexInputSize)); + cache.decRef(key); + } + final class FakeIndexInput extends CachedIndexInput { private final long length; diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/ConcurrentInvocationLinearizerTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/ConcurrentInvocationLinearizerTests.java deleted file mode 100644 index 5538b77184604..0000000000000 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/ConcurrentInvocationLinearizerTests.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.store.remote.utils; - -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.hamcrest.Matchers.anEmptyMap; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; - -public class ConcurrentInvocationLinearizerTests extends OpenSearchTestCase { - private ExecutorService executorService; - - @Before - public void setup() { - executorService = Executors.newSingleThreadExecutor(); - } - - public void testLinearizeShouldNotInvokeMethodMoreThanOnce() throws Exception { - final ConcurrentInvocationLinearizer invocationLinearizer = new ConcurrentInvocationLinearizer<>(); - final CountDownLatch startLatch = new CountDownLatch(1); - final CountDownLatch finishLatch = new CountDownLatch(1); - - final Future> first = executorService.submit(() -> invocationLinearizer.linearizeInternal("input", s -> { - startLatch.countDown(); - try { - finishLatch.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - return "expected"; - })); - - startLatch.await(); // Wait for first caller to start work - final Future second = invocationLinearizer.linearizeInternal("input", s -> { throw new AssertionError(); }); - final Future third = invocationLinearizer.linearizeInternal("input", s -> { throw new AssertionError(); }); - finishLatch.countDown(); // Unblock first caller - MatcherAssert.assertThat(first.get().get(), equalTo("expected")); - MatcherAssert.assertThat(second.get(), equalTo("expected")); - MatcherAssert.assertThat(third.get(), equalTo("expected")); - MatcherAssert.assertThat(invocationLinearizer.getInvokeOnceCache(), is(anEmptyMap())); - } - - public void testLinearizeSharesFailures() throws Exception { - final ConcurrentInvocationLinearizer invocationLinearizer = new ConcurrentInvocationLinearizer<>(); - final CountDownLatch startLatch = new CountDownLatch(1); - final CountDownLatch finishLatch = new CountDownLatch(1); - - final Future> first = executorService.submit(() -> invocationLinearizer.linearizeInternal("input", s -> { - startLatch.countDown(); - try { - finishLatch.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - throw new IOException("io exception"); - })); - - startLatch.await(); // Wait for first caller to start work - final Future second = invocationLinearizer.linearizeInternal("input", s -> { throw new AssertionError(); }); - finishLatch.countDown(); // Unblock first caller - final ExecutionException e1 = assertThrows(ExecutionException.class, () -> first.get().get()); - MatcherAssert.assertThat(e1.getCause(), instanceOf(IOException.class)); - final ExecutionException e2 = assertThrows(ExecutionException.class, second::get); - MatcherAssert.assertThat(e2.getCause(), instanceOf(IOException.class)); - MatcherAssert.assertThat(invocationLinearizer.getInvokeOnceCache(), is(anEmptyMap())); - } - - public void testLinearizeShouldLeaveCacheEmptyEvenWhenFutureFail() { - ConcurrentInvocationLinearizer invocationLinearizer = new ConcurrentInvocationLinearizer<>(); - assertThrows( - RuntimeException.class, - () -> invocationLinearizer.linearize("input", s -> { throw new RuntimeException("exception"); }) - ); - MatcherAssert.assertThat("Expected nothing to be cached on failure", invocationLinearizer.getInvokeOnceCache(), is(anEmptyMap())); - } - - public void testExceptionHandling() { - final ConcurrentInvocationLinearizer invocationLinearizer = new ConcurrentInvocationLinearizer<>(); - assertThrows( - RuntimeException.class, - () -> invocationLinearizer.linearize("input", s -> { throw new RuntimeException("exception"); }) - ); - assertThrows(IOException.class, () -> invocationLinearizer.linearize("input", s -> { throw new IOException("exception"); })); - assertThrows(AssertionError.class, () -> invocationLinearizer.linearize("input", s -> { throw new AssertionError("exception"); })); - final RuntimeException e = assertThrows( - RuntimeException.class, - () -> invocationLinearizer.linearize("input", s -> { throw sneakyThrow(new TestCheckedException()); }) - ); - MatcherAssert.assertThat(e.getCause(), instanceOf(TestCheckedException.class)); - } - - @After - public void cleanUp() { - executorService.shutdownNow(); - terminate(executorService); - } - - // Some unholy hackery with generics to trick the compiler into throwing an undeclared checked exception - private static E sneakyThrow(Throwable e) throws E { - throw (E) e; - } - - private static class TestCheckedException extends Exception {} -} diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java index 4e607cb587cc6..f3049c504f295 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java @@ -32,12 +32,17 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class TransferManagerTests extends OpenSearchTestCase { - private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(1024 * 1024, 8); + private static final int EIGHT_MB = 1024 * 1024 * 8; + private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(EIGHT_MB * 2, 1); private MMapDirectory directory; private BlobContainer blobContainer; private TransferManager transferManager; @@ -47,7 +52,7 @@ public void setUp() throws Exception { super.setUp(); directory = new MMapDirectory(createTempDir(), SimpleFSLockFactory.INSTANCE); blobContainer = mock(BlobContainer.class); - doAnswer(i -> new ByteArrayInputStream(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 })).when(blobContainer).readBlob("blob", 0, 8); + doAnswer(i -> new ByteArrayInputStream(createData())).when(blobContainer).readBlob(eq("blob"), anyLong(), anyLong()); transferManager = new TransferManager(blobContainer, fileCache); } @@ -56,20 +61,29 @@ public void tearDown() throws Exception { super.tearDown(); } + private static byte[] createData() { + final byte[] data = new byte[EIGHT_MB]; + data[EIGHT_MB - 1] = 7; + return data; + } + public void testSingleAccess() throws Exception { - try (IndexInput i = fetchBlob()) { - i.seek(7); - MatcherAssert.assertThat(i.readByte(), equalTo((byte) 7)); + try (IndexInput i = fetchBlobWithName("file")) { + assertIndexInputIsFunctional(i); + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo((long) EIGHT_MB)); } + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.usage().usage(), equalTo((long) EIGHT_MB)); } public void testConcurrentAccess() throws Exception { // Kick off multiple threads that all concurrently request the same resource + final String blobname = "file"; final ExecutorService testRunner = Executors.newFixedThreadPool(8); try { final List> futures = new ArrayList<>(); for (int i = 0; i < 8; i++) { - futures.add(testRunner.submit(this::fetchBlob)); + futures.add(testRunner.submit(() -> fetchBlobWithName(blobname))); } // Wait for all threads to complete for (Future future : futures) { @@ -80,19 +94,92 @@ public void testConcurrentAccess() throws Exception { // result in EOFExceptions and/or NPEs. for (Future future : futures) { try (IndexInput i = future.get()) { - i.seek(7); - MatcherAssert.assertThat(i.readByte(), equalTo((byte) 7)); + assertIndexInputIsFunctional(i); } } } finally { - testRunner.shutdown(); - assertTrue(testRunner.awaitTermination(1, TimeUnit.SECONDS)); + assertTrue(terminate(testRunner)); + } + } + + public void testFetchBlobWithConcurrentCacheEvictions() throws Exception { + // Submit 256 tasks to an executor with 16 threads that will each randomly + // request one of eight blobs. Given that the cache can only hold two + // blobs this will lead to a huge amount of contention and thrashing. + final ExecutorService testRunner = Executors.newFixedThreadPool(16); + try { + final List> futures = new ArrayList<>(); + for (int i = 0; i < 256; i++) { + // request an index input and immediately close it + final String blobname = "blob-" + randomIntBetween(0, 7); + futures.add(testRunner.submit(() -> { + try { + try (IndexInput indexInput = fetchBlobWithName(blobname)) { + assertIndexInputIsFunctional(indexInput); + } + } catch (Exception e) { + throw new AssertionError(e); + } + })); + } + // Wait for all threads to complete + for (Future future : futures) { + future.get(10, TimeUnit.SECONDS); + } + } finally { + assertTrue(terminate(testRunner)); } + MatcherAssert.assertThat("Expected many evictions to happen", fileCache.stats().evictionCount(), greaterThan(0L)); } - private IndexInput fetchBlob() throws InterruptedException, IOException { + public void testUsageExceedsCapacity() throws Exception { + // Fetch resources that exceed the configured capacity of the cache and assert that the + // returned IndexInputs are still functional. + try (IndexInput i1 = fetchBlobWithName("1"); IndexInput i2 = fetchBlobWithName("2"); IndexInput i3 = fetchBlobWithName("3")) { + assertIndexInputIsFunctional(i1); + assertIndexInputIsFunctional(i2); + assertIndexInputIsFunctional(i3); + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo((long) EIGHT_MB * 3)); + MatcherAssert.assertThat(fileCache.usage().usage(), equalTo((long) EIGHT_MB * 3)); + } + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.usage().usage(), equalTo((long) EIGHT_MB * 3)); + // Fetch another resource which will trigger an eviction + try (IndexInput i1 = fetchBlobWithName("1")) { + assertIndexInputIsFunctional(i1); + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo((long) EIGHT_MB)); + MatcherAssert.assertThat(fileCache.usage().usage(), equalTo((long) EIGHT_MB)); + } + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.usage().usage(), equalTo((long) EIGHT_MB)); + } + + public void testDownloadFails() throws Exception { + doThrow(new IOException("Expected test exception")).when(blobContainer).readBlob(eq("failure-blob"), anyLong(), anyLong()); + expectThrows( + IOException.class, + () -> transferManager.fetchBlob( + BlobFetchRequest.builder() + .blobName("failure-blob") + .position(0) + .fileName("file") + .directory(directory) + .length(EIGHT_MB) + .build() + ) + ); + MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); + MatcherAssert.assertThat(fileCache.usage().usage(), equalTo(0L)); + } + + private IndexInput fetchBlobWithName(String blobname) throws IOException { return transferManager.fetchBlob( - BlobFetchRequest.builder().blobName("blob").position(0).fileName("file").directory(directory).length(8).build() + BlobFetchRequest.builder().blobName("blob").position(0).fileName(blobname).directory(directory).length(EIGHT_MB).build() ); } + + private static void assertIndexInputIsFunctional(IndexInput indexInput) throws IOException { + indexInput.seek(EIGHT_MB - 1); + MatcherAssert.assertThat(indexInput.readByte(), equalTo((byte) 7)); + } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/LRUCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/LRUCacheTests.java new file mode 100644 index 0000000000000..47cb8000b6c83 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/LRUCacheTests.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.utils.cache; + +public class LRUCacheTests extends RefCountedCacheTestCase { + public LRUCacheTests() { + super(new LRUCache<>(CAPACITY, n -> {}, value -> value)); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java new file mode 100644 index 0000000000000..59657eebc4480 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTestCase.java @@ -0,0 +1,203 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.utils.cache; + +import org.opensearch.test.OpenSearchTestCase; + +abstract class RefCountedCacheTestCase extends OpenSearchTestCase { + static final int CAPACITY = 100; + + private final RefCountedCache refCountedCache; + + protected RefCountedCacheTestCase(RefCountedCache refCountedCache) { + this.refCountedCache = refCountedCache; + } + + public void testBasicGetAndPutAndRemove() { + assertNull(refCountedCache.get("1")); + refCountedCache.put("1", 10L); + assertEquals(10L, (long) refCountedCache.get("1")); + refCountedCache.remove("1"); + assertNull(refCountedCache.get("1")); + } + + public void testUsageWithIncrementAndDecrement() { + refCountedCache.put("1", 10L); + assertEquals(10L, refCountedCache.usage().usage()); + assertEquals(10L, refCountedCache.usage().activeUsage()); + + refCountedCache.decRef("1"); + assertEquals(10L, refCountedCache.usage().usage()); + assertEquals(0L, refCountedCache.usage().activeUsage()); + + refCountedCache.incRef("1"); + assertEquals(10L, refCountedCache.usage().usage()); + assertEquals(10L, refCountedCache.usage().activeUsage()); + } + + public void testEviction() { + for (int i = 1; i <= 5; i++) { + final String key = Integer.toString(i); + refCountedCache.put(key, 25L); + refCountedCache.decRef(key); + } + assertNull(refCountedCache.get("1")); + assertNull(refCountedCache.get("2")); + assertNotNull(refCountedCache.get("3")); + assertNotNull(refCountedCache.get("4")); + assertNotNull(refCountedCache.get("5")); + + assertEquals(75L, refCountedCache.usage().usage()); + assertEquals(75L, refCountedCache.usage().activeUsage()); + } + + public void testComputeRemoveWhenExists() { + refCountedCache.put("1", 25L); + refCountedCache.decRef("1"); + assertEquals(0, refCountedCache.usage().activeUsage()); + assertEquals(25L, refCountedCache.usage().usage()); + + assertNull(refCountedCache.compute("1", (k, v) -> null)); + assertNull(refCountedCache.get("1")); + assertEquals(0, refCountedCache.usage().activeUsage()); + assertEquals(0L, refCountedCache.usage().usage()); + } + + public void testComputeRemoveWhenNotExists() { + assertUsage(0, 0); + assertNull(refCountedCache.compute("1", (k, v) -> null)); + assertNull(refCountedCache.get("1")); + assertUsage(0, 0); + } + + public void testComputeRemapExists() { + assertUsage(0, 0); + refCountedCache.put("1", 25L); + refCountedCache.decRef("1"); + assertUsage(25, 0); + + final long newValue = refCountedCache.compute("1", (k, v) -> v + 5); + assertEquals(30L, newValue); + assertUsage(30, 30); + + refCountedCache.decRef("1"); + assertUsage(30, 0); + + assertEquals(30L, (long) refCountedCache.get("1")); + } + + public void testComputeRemapNotExists() { + assertUsage(0, 0); + final long newValue = refCountedCache.compute("1", (k, v) -> 30L); + assertEquals(30L, newValue); + assertUsage(30, 30); + + refCountedCache.decRef("1"); + assertUsage(30, 0); + + assertEquals(30L, (long) refCountedCache.get("1")); + } + + public void testActiveUsageGreaterThanCapacity() { + for (int i = 1; i <= 5; i++) { + final String key = Integer.toString(i); + refCountedCache.put(key, 25L); + } + assertEquals(125L, refCountedCache.usage().usage()); + assertEquals(125L, refCountedCache.usage().activeUsage()); + } + + public void testReferenceCountingItemsThatDoNotExist() { + assertNull(refCountedCache.get("1")); + assertUsage(0, 0); + refCountedCache.incRef("1"); + assertNull(refCountedCache.get("1")); + assertUsage(0, 0); + refCountedCache.decRef("1"); + assertNull(refCountedCache.get("1")); + assertEquals(0L, refCountedCache.usage().usage()); + assertEquals(0L, refCountedCache.usage().activeUsage()); + } + + public void testPrune() { + refCountedCache.put("1", 10L); + refCountedCache.decRef("1"); + refCountedCache.put("2", 10L); + refCountedCache.decRef("2"); + refCountedCache.put("3", 10L); + + assertEquals(20L, refCountedCache.prune()); + assertNull(refCountedCache.get("1")); + assertNull(refCountedCache.get("2")); + assertEquals(10L, (long) refCountedCache.get("3")); + } + + public void testStats() { + assertEquals(0, refCountedCache.stats().hitCount()); + refCountedCache.put("1", 1L); + refCountedCache.get("1"); + assertEquals(1, refCountedCache.stats().hitCount()); + + assertEquals(0, refCountedCache.stats().replaceCount()); + refCountedCache.put("1", 2L); + assertEquals(1, refCountedCache.stats().replaceCount()); + assertEquals(1, refCountedCache.stats().hitCount()); + + assertEquals(0, refCountedCache.stats().evictionCount()); + refCountedCache.put("2", 80L); + refCountedCache.decRef("2"); + refCountedCache.put("3", 80L); + assertEquals(1, refCountedCache.stats().evictionCount()); + + assertEquals(0, refCountedCache.stats().missCount()); + assertNull(refCountedCache.get("2")); + assertEquals(1, refCountedCache.stats().missCount()); + + assertEquals(0, refCountedCache.stats().removeCount()); + refCountedCache.remove("3"); + assertEquals(1, refCountedCache.stats().removeCount()); + } + + public void testComputeStats() { + refCountedCache.compute("1", (k, v) -> null); + assertEquals(0, refCountedCache.stats().missCount()); + assertEquals(0, refCountedCache.stats().hitCount()); + assertEquals(0, refCountedCache.stats().replaceCount()); + assertEquals(0, refCountedCache.stats().removeCount()); + + refCountedCache.compute("1", (k, v) -> 10L); + assertEquals(1, refCountedCache.stats().missCount()); + assertEquals(0, refCountedCache.stats().hitCount()); + assertEquals(0, refCountedCache.stats().replaceCount()); + assertEquals(0, refCountedCache.stats().removeCount()); + + refCountedCache.compute("1", (k, v) -> 10L); + assertEquals(1, refCountedCache.stats().missCount()); + assertEquals(1, refCountedCache.stats().hitCount()); + assertEquals(0, refCountedCache.stats().replaceCount()); + assertEquals(0, refCountedCache.stats().removeCount()); + + refCountedCache.compute("1", (k, v) -> 20L); + assertEquals(1, refCountedCache.stats().missCount()); + assertEquals(2, refCountedCache.stats().hitCount()); + assertEquals(1, refCountedCache.stats().replaceCount()); + assertEquals(0, refCountedCache.stats().removeCount()); + + refCountedCache.compute("1", (k, v) -> null); + assertEquals(1, refCountedCache.stats().missCount()); + assertEquals(2, refCountedCache.stats().hitCount()); + assertEquals(1, refCountedCache.stats().replaceCount()); + assertEquals(1, refCountedCache.stats().removeCount()); + } + + private void assertUsage(long usage, long activeUsage) { + assertEquals(usage, refCountedCache.usage().usage()); + assertEquals(activeUsage, refCountedCache.usage().activeUsage()); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTests.java deleted file mode 100644 index 044e1e99627c0..0000000000000 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/RefCountedCacheTests.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.store.remote.utils.cache; - -import org.junit.Before; -import org.mockito.Mockito; -import org.opensearch.common.cache.RemovalListener; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; - -public class RefCountedCacheTests extends OpenSearchTestCase { - private static final long SIZE = 100; - private RemovalListener removalListener; - - @Before - public void setUp() throws Exception { - super.setUp(); - this.removalListener = Mockito.mock(RemovalListener.class); - } - - public void testLRUCache() { - executeRefCountedCacheTests(new LRUCache<>(SIZE, removalListener, value -> value)); - } - - public void testSegmentedCache() { - executeRefCountedCacheTests( - SegmentedCache.builder() - .capacity(SIZE) - .weigher(value -> value) - .listener(removalListener) - .concurrencyLevel(1) - .build() - ); - } - - void executeRefCountedCacheTests(RefCountedCache refCountedCache) { - // basic get and put operation - assertNull(refCountedCache.get("1")); - refCountedCache.put("1", 10L); - assertEquals(10L, (long) refCountedCache.get("1")); - - // cache usage with ref ++ and -- - assertEquals(10L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); - refCountedCache.incRef("1"); - assertEquals(10L, refCountedCache.usage().usage()); - assertEquals(10L, refCountedCache.usage().activeUsage()); - refCountedCache.decRef("1"); - assertEquals(10L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); - - // put all delegation - Map toPutIntoCache = new HashMap<>() { - { - put("2", 20L); - put("3", 30L); - } - }; - refCountedCache.putAll(toPutIntoCache); - toPutIntoCache.forEach((k, v) -> { assertEquals(v, refCountedCache.get(k)); }); - assertEquals(60L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); - - // since all entries has ref count = 0 first added one will be evicted first once usage >= capacity - refCountedCache.put("4", 40L); - refCountedCache.put("5", 10L); - assertNull(refCountedCache.get("1")); - assertNull(refCountedCache.get("2")); - Arrays.asList("3", "4", "5").forEach(k -> assertNotNull(refCountedCache.get(k))); - assertEquals(80L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); - - // simple compute if present when present - refCountedCache.computeIfPresent("3", (k, v) -> { return v + 5; }); - assertEquals(35L, (long) refCountedCache.get("3")); - assertEquals(85L, refCountedCache.usage().usage()); - assertEquals(0L, refCountedCache.usage().activeUsage()); - - // simple compute if present when not present - refCountedCache.computeIfPresent("1", (k, v) -> { - fail("should not reach here"); - return v + 5; - }); - assertNull(refCountedCache.get("1")); - - // inc ref all entries to prevent cache evictions - refCountedCache.incRef("3"); - refCountedCache.incRef("4"); - refCountedCache.incRef("5"); - assertEquals(85L, refCountedCache.usage().usage()); - assertEquals(85L, refCountedCache.usage().activeUsage()); - - // adding cache entry while > capacity won't put entry to cache - refCountedCache.put("6", 15L); - assertNull(refCountedCache.get("6")); - assertEquals(85L, refCountedCache.usage().usage()); - assertEquals(85L, refCountedCache.usage().activeUsage()); - - // dec ref to add 6 instead of 3 - refCountedCache.decRef("3"); - refCountedCache.put("6", 15L); - assertNull(refCountedCache.get("3")); - assertEquals(15L, (long) refCountedCache.get("6")); - assertEquals(65L, refCountedCache.usage().usage()); - assertEquals(50L, refCountedCache.usage().activeUsage()); - - // check stats - assertEquals(4, refCountedCache.stats().evictionCount()); - assertEquals(9, refCountedCache.stats().hitCount()); - assertEquals(7, refCountedCache.stats().missCount()); - assertEquals(0, refCountedCache.stats().removeCount()); - assertEquals(1, refCountedCache.stats().replaceCount()); - - // remove one entry - refCountedCache.remove("6"); - assertNull(refCountedCache.get("6")); - assertEquals(50L, refCountedCache.usage().usage()); - assertEquals(50L, refCountedCache.usage().activeUsage()); - assertEquals(1, refCountedCache.stats().removeCount()); - } -} diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/cache/SegmentedCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/SegmentedCacheTests.java new file mode 100644 index 0000000000000..4081bf4d61907 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/cache/SegmentedCacheTests.java @@ -0,0 +1,17 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.utils.cache; + +public class SegmentedCacheTests extends RefCountedCacheTestCase { + public SegmentedCacheTests() { + super( + SegmentedCache.builder().capacity(CAPACITY).weigher(value -> value).listener(n -> {}).concurrencyLevel(1).build() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java b/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java index 1f28e32a6dbec..3a27d26a630d6 100644 --- a/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java @@ -98,7 +98,9 @@ public void onFailure(String reason, Exception ex) { TranslogEventListener throwingListener = (TranslogEventListener) Proxy.newProxyInstance( TranslogEventListener.class.getClassLoader(), new Class[] { TranslogEventListener.class }, - (a, b, c) -> { throw new RuntimeException(); } + (a, b, c) -> { + throw new RuntimeException(); + } ); final List translogEventListeners = new LinkedList<>(Arrays.asList(listener, throwingListener, listener)); diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index a50089831b3e9..ff4058f953642 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -148,7 +148,9 @@ public void testWriteFileChunksConcurrently() throws Exception { r.content(), r.lastChunk(), r.totalTranslogOps(), - ActionListener.wrap(ignored -> {}, e -> { throw new AssertionError(e); }) + ActionListener.wrap(ignored -> {}, e -> { + throw new AssertionError(e); + }) ); } } catch (Exception e) { diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index bd3106454f49b..8cb7a5f6d8929 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -9,7 +9,6 @@ package org.opensearch.indices.replication; import org.junit.Assert; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -33,8 +32,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -76,13 +74,7 @@ public void setUp() throws Exception { ShardId testShardId = primary.shardId(); // This mirrors the creation of the ReplicationCheckpoint inside CopyState - testCheckpoint = new ReplicationCheckpoint( - testShardId, - primary.getOperationPrimaryTerm(), - 0L, - primary.getProcessedLocalCheckpoint(), - 0L - ); + testCheckpoint = new ReplicationCheckpoint(testShardId, primary.getOperationPrimaryTerm(), 0L, 0L); IndexService mockIndexService = mock(IndexService.class); when(mockIndicesService.indexService(testShardId.getIndex())).thenReturn(mockIndexService); when(mockIndexService.getShard(testShardId.id())).thenReturn(primary); @@ -284,7 +276,8 @@ public void testShardAlreadyReplicatingToNode() throws IOException { listener.onResponse(null); }; replications.prepareForReplication(request, segmentSegmentFileChunkWriter); - assertThrows(OpenSearchException.class, () -> { replications.prepareForReplication(request, segmentSegmentFileChunkWriter); }); + CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + assertEquals(1, copyState.refCount()); } public void testStartReplicationWithNoFilesToFetch() throws IOException { @@ -365,4 +358,45 @@ public void testCancelAllReplicationsForShard() throws IOException { assertEquals(0, replications.cachedCopyStateSize()); closeShards(replica_2); } + + public void testCancelForMissingIds() throws IOException { + // This tests when primary has multiple ongoing replications. + IndexShard replica_2 = newShard(primary.shardId(), false); + recoverReplica(replica_2, primary, true); + + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + final String replicaAllocationId = replica.routingEntry().allocationId().getId(); + final CheckpointInfoRequest request = new CheckpointInfoRequest(1L, replicaAllocationId, primaryDiscoveryNode, testCheckpoint); + + final CopyState copyState = replications.prepareForReplication(request, mock(FileChunkWriter.class)); + assertEquals(1, copyState.refCount()); + + final String replica_2AllocationId = replica_2.routingEntry().allocationId().getId(); + final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest( + 1L, + replica_2AllocationId, + replicaDiscoveryNode, + testCheckpoint + ); + replications.prepareForReplication(secondRequest, mock(FileChunkWriter.class)); + + assertEquals(2, copyState.refCount()); + assertEquals(2, replications.size()); + assertTrue(replications.getHandlers().containsKey(replicaAllocationId)); + assertTrue(replications.getHandlers().containsKey(replica_2AllocationId)); + assertEquals(1, replications.cachedCopyStateSize()); + + replications.clearOutOfSyncIds(primary.shardId(), Set.of(replica_2AllocationId)); + assertEquals(1, copyState.refCount()); + assertEquals(1, replications.size()); + assertTrue(replications.getHandlers().containsKey(replica_2AllocationId)); + assertEquals(1, replications.cachedCopyStateSize()); + + // cancel the primary's ongoing replications. + replications.clearOutOfSyncIds(primary.shardId(), Collections.emptySet()); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + closeShards(replica_2); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 323445bee1274..d925956bd95ef 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -50,6 +50,8 @@ public class PrimaryShardReplicationSourceTests extends IndexShardTestCase { private IndexShard indexShard; private DiscoveryNode sourceNode; + private RecoverySettings recoverySettings; + @Override public void setUp() throws Exception { super.setUp(); @@ -73,6 +75,7 @@ public void setUp() throws Exception { indexShard = newStartedShard(true); + this.recoverySettings = recoverySettings; replicationSource = new PrimaryShardReplicationSource( localNode, indexShard.routingEntry().allocationId().toString(), @@ -90,13 +93,7 @@ public void tearDown() throws Exception { } public void testGetCheckpointMetadata() { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - SEQ_NO, - VERSION - ); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, mock(ActionListener.class)); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); assertEquals(1, requestList.length); @@ -107,13 +104,7 @@ public void testGetCheckpointMetadata() { } public void testGetSegmentFiles() { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - SEQ_NO, - VERSION - ); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); replicationSource.getSegmentFiles( REPLICATION_ID, @@ -130,15 +121,31 @@ public void testGetSegmentFiles() { assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); } + /** + * This test verifies the transport request timeout value for fetching the segment files. + */ + public void testTransportTimeoutForGetSegmentFilesAction() { + long fileSize = (long) (Math.pow(10, 9)); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", fileSize, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + mock(ActionListener.class) + ); + CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); + assertEquals(1, requestList.length); + CapturingTransport.CapturedRequest capturedRequest = requestList[0]; + assertEquals(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES, capturedRequest.action); + assertEquals(sourceNode, capturedRequest.node); + assertEquals(recoverySettings.internalActionLongTimeout(), capturedRequest.options.timeout()); + } + public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - SEQ_NO, - VERSION - ); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); replicationSource.getSegmentFiles( REPLICATION_ID, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java index 63dab4f8883e8..26c55c30e2af3 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java @@ -245,16 +245,13 @@ public void testSendFiles_CorruptIndexException() throws Exception { ); doNothing().when(shard).failShard(anyString(), any()); - assertThrows( - CorruptIndexException.class, - () -> { - handler.handleErrorOnSendFiles( - shard.store(), - new CorruptIndexException("test", "test"), - new StoreFileMetadata[] { SEGMENTS_FILE } - ); - } - ); + assertThrows(CorruptIndexException.class, () -> { + handler.handleErrorOnSendFiles( + shard.store(), + new CorruptIndexException("test", "test"), + new StoreFileMetadata[] { SEGMENTS_FILE } + ); + }); verify(shard, times(1)).failShard(any(), any()); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index b5d8b2baf40dc..607f9dd91e35e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -73,7 +73,7 @@ public void testSendFiles() throws IOException { chunkWriter, threadPool, copyState, - primary.routingEntry().allocationId().getId(), + replica.routingEntry().allocationId().getId(), 5000, 1 ); @@ -111,7 +111,7 @@ public void testSendFiles_emptyRequest() throws IOException { chunkWriter, threadPool, copyState, - primary.routingEntry().allocationId().getId(), + replica.routingEntry().allocationId().getId(), 5000, 1 ); @@ -191,7 +191,7 @@ public void testReplicationAlreadyRunning() throws IOException { chunkWriter, threadPool, copyState, - primary.routingEntry().allocationId().getId(), + replica.routingEntry().allocationId().getId(), 5000, 1 ); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index 6183f1e5d9dfb..66df13b89b4c3 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -55,13 +55,7 @@ public void setUp() throws Exception { when(mockIndexService.getShard(testShardId.id())).thenReturn(mockIndexShard); // This mirrors the creation of the ReplicationCheckpoint inside CopyState - testCheckpoint = new ReplicationCheckpoint( - testShardId, - mockIndexShard.getOperationPrimaryTerm(), - 0L, - mockIndexShard.getProcessedLocalCheckpoint(), - 0L - ); + testCheckpoint = new ReplicationCheckpoint(testShardId, mockIndexShard.getOperationPrimaryTerm(), 0L, 0L); testThreadPool = new TestThreadPool("test", Settings.EMPTY); CapturingTransport transport = new CapturingTransport(); localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 69e1e6f8de09b..bae0afb5bcc3b 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -65,7 +65,7 @@ public void setUp() throws Exception { primaryShard = newStartedShard(true, settings); replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replicaShard, primaryShard, true, getReplicationFunc(replicaShard)); - checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); + checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L); SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); replicationSource = mock(SegmentReplicationSource.class); when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); @@ -76,14 +76,12 @@ public void setUp() throws Exception { initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), initialCheckpoint.getSegmentsGen(), - initialCheckpoint.getSeqNo(), initialCheckpoint.getSegmentInfosVersion() + 1 ); newPrimaryCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm() + 1, initialCheckpoint.getSegmentsGen(), - initialCheckpoint.getSeqNo(), initialCheckpoint.getSegmentInfosVersion() + 1 ); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 7e3fca9008bfb..599e73b548ddb 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -55,7 +55,6 @@ import java.util.Arrays; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -97,7 +96,7 @@ public void setUp() throws Exception { indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); spyIndexShard = spy(indexShard); - Mockito.doNothing().when(spyIndexShard).finalizeReplication(any(SegmentInfos.class), anyLong()); + Mockito.doNothing().when(spyIndexShard).finalizeReplication(any(SegmentInfos.class)); testSegmentInfos = spyIndexShard.store().readLastCommittedSegmentsInfo(); buffer = new ByteBuffersDataOutput(); try (ByteBuffersIndexOutput indexOutput = new ByteBuffersIndexOutput(buffer, "", null)) { @@ -107,7 +106,6 @@ public void setUp() throws Exception { spyIndexShard.shardId(), spyIndexShard.getPendingPrimaryTerm(), testSegmentInfos.getGeneration(), - spyIndexShard.seqNoStats().getLocalCheckpoint(), testSegmentInfos.version ); } @@ -147,7 +145,7 @@ public void getSegmentFiles( @Override public void onResponse(Void replicationResponse) { try { - verify(spyIndexShard, times(1)).finalizeReplication(any(), anyLong()); + verify(spyIndexShard, times(1)).finalizeReplication(any()); segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); @@ -277,7 +275,7 @@ public void getSegmentFiles( ); segrepTarget = new SegmentReplicationTarget(repCheckpoint, spyIndexShard, segrepSource, segRepListener); - doThrow(exception).when(spyIndexShard).finalizeReplication(any(), anyLong()); + doThrow(exception).when(spyIndexShard).finalizeReplication(any()); segrepTarget.startReplication(new ActionListener() { @Override @@ -322,7 +320,7 @@ public void getSegmentFiles( ); segrepTarget = new SegmentReplicationTarget(repCheckpoint, spyIndexShard, segrepSource, segRepListener); - doThrow(exception).when(spyIndexShard).finalizeReplication(any(), anyLong()); + doThrow(exception).when(spyIndexShard).finalizeReplication(any()); segrepTarget.startReplication(new ActionListener() { @Override @@ -374,8 +372,8 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { - assert (e instanceof IllegalStateException); - segrepTarget.fail(new ReplicationFailedException(e), false); + assert (e instanceof ReplicationFailedException); + assert (e.getMessage().contains("different segment files")); } }); } diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 704b40b05c49d..2c05fbc9328e5 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -8,9 +8,9 @@ package org.opensearch.indices.replication.checkpoint; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; @@ -103,10 +104,13 @@ public void testPublishCheckpointActionOnPrimary() { mockTargetService ); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 111, 11, 1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 11, 1); final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); - expectThrows(OpenSearchException.class, () -> { action.shardOperationOnPrimary(request, indexShard, mock(ActionListener.class)); }); + action.shardOperationOnPrimary(request, indexShard, ActionTestUtils.assertNoFailureListener(result -> { + // we should forward the request containing the current publish checkpoint to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + })); } public void testPublishCheckpointActionOnReplica() { @@ -135,7 +139,7 @@ public void testPublishCheckpointActionOnReplica() { mockTargetService ); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 111, 11, 1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 11, 1); final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index 6f2be9db6b2dd..a87a8de206a39 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; -import org.opensearch.common.collect.Map; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.index.shard.IndexShard; @@ -23,6 +22,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; +import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -67,13 +67,7 @@ public static IndexShard createMockIndexShard() throws IOException { when(mockShard.store()).thenReturn(mockStore); SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); - ReplicationCheckpoint testCheckpoint = new ReplicationCheckpoint( - mockShard.shardId(), - mockShard.getOperationPrimaryTerm(), - 0L, - mockShard.getProcessedLocalCheckpoint(), - 0L - ); + ReplicationCheckpoint testCheckpoint = new ReplicationCheckpoint(mockShard.shardId(), mockShard.getOperationPrimaryTerm(), 0L, 0L); final Tuple, ReplicationCheckpoint> gatedCloseableReplicationCheckpointTuple = new Tuple<>( new GatedCloseable<>(testSegmentInfos, () -> {}), testCheckpoint diff --git a/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java b/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java index fb0c55fce28f1..b299ac4d66996 100644 --- a/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java @@ -75,9 +75,9 @@ public void testEmpty() throws Exception { public void testSingleProcessor() throws Exception { LongSupplier relativeTimeProvider = mock(LongSupplier.class); when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(1)); - TestProcessor processor = new TestProcessor( - ingestDocument -> { assertStats(0, ingestDocument.getFieldValue("compoundProcessor", CompoundProcessor.class), 1, 0, 0, 0); } - ); + TestProcessor processor = new TestProcessor(ingestDocument -> { + assertStats(0, ingestDocument.getFieldValue("compoundProcessor", CompoundProcessor.class), 1, 0, 0, 0); + }); CompoundProcessor compoundProcessor = new CompoundProcessor(relativeTimeProvider, processor); ingestDocument.setFieldValue("compoundProcessor", compoundProcessor); // ugly hack to assert current count = 1 assertThat(compoundProcessor.getProcessors().size(), equalTo(1)); diff --git a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java index 1550dd65442a4..a383ab9b97918 100644 --- a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java @@ -164,21 +164,10 @@ public void testActsOnImmutableData() throws Exception { } public void testPrecompiledError() { - ScriptService scriptService = MockScriptService.singleContext( - IngestConditionalScript.CONTEXT, - code -> { - throw new ScriptException( - "bad script", - new ParseException("error", 0), - org.opensearch.common.collect.List.of(), - "", - "lang", - null - ); - }, - org.opensearch.common.collect.Map.of() - ); - Script script = new Script(ScriptType.INLINE, "lang", "foo", org.opensearch.common.collect.Map.of()); + ScriptService scriptService = MockScriptService.singleContext(IngestConditionalScript.CONTEXT, code -> { + throw new ScriptException("bad script", new ParseException("error", 0), List.of(), "", "lang", null); + }, Map.of()); + Script script = new Script(ScriptType.INLINE, "lang", "foo", Map.of()); ScriptException e = expectThrows(ScriptException.class, () -> new ConditionalProcessor(null, null, script, scriptService, null)); assertThat(e.getMessage(), equalTo("bad script")); } @@ -186,17 +175,10 @@ public void testPrecompiledError() { public void testRuntimeCompileError() { AtomicBoolean fail = new AtomicBoolean(false); Map storedScripts = new HashMap<>(); - storedScripts.put("foo", new StoredScriptSource("lang", "", org.opensearch.common.collect.Map.of())); + storedScripts.put("foo", new StoredScriptSource("lang", "", Map.of())); ScriptService scriptService = MockScriptService.singleContext(IngestConditionalScript.CONTEXT, code -> { if (fail.get()) { - throw new ScriptException( - "bad script", - new ParseException("error", 0), - org.opensearch.common.collect.List.of(), - "", - "lang", - null - ); + throw new ScriptException("bad script", new ParseException("error", 0), List.of(), "", "lang", null); } else { return params -> new IngestConditionalScript(params) { @Override @@ -206,12 +188,12 @@ public boolean execute(Map ctx) { }; } }, storedScripts); - Script script = new Script(ScriptType.STORED, null, "foo", org.opensearch.common.collect.Map.of()); + Script script = new Script(ScriptType.STORED, null, "foo", Map.of()); ConditionalProcessor processor = new ConditionalProcessor(null, null, script, scriptService, null); fail.set(true); // must change the script source or the cached version will be used - storedScripts.put("foo", new StoredScriptSource("lang", "changed", org.opensearch.common.collect.Map.of())); - IngestDocument ingestDoc = new IngestDocument(org.opensearch.common.collect.Map.of(), org.opensearch.common.collect.Map.of()); + storedScripts.put("foo", new StoredScriptSource("lang", "changed", Map.of())); + IngestDocument ingestDoc = new IngestDocument(Map.of(), Map.of()); processor.execute(ingestDoc, (doc, e) -> { assertThat(e.getMessage(), equalTo("bad script")); }); } @@ -224,11 +206,11 @@ public boolean execute(Map ctx) { throw new IllegalArgumentException("runtime problem"); } }, - org.opensearch.common.collect.Map.of() + Map.of() ); - Script script = new Script(ScriptType.INLINE, "lang", "foo", org.opensearch.common.collect.Map.of()); + Script script = new Script(ScriptType.INLINE, "lang", "foo", Map.of()); ConditionalProcessor processor = new ConditionalProcessor(null, null, script, scriptService, null); - IngestDocument ingestDoc = new IngestDocument(org.opensearch.common.collect.Map.of(), org.opensearch.common.collect.Map.of()); + IngestDocument ingestDoc = new IngestDocument(Map.of(), Map.of()); processor.execute(ingestDoc, (doc, e) -> { assertThat(e.getMessage(), equalTo("runtime problem")); }); } diff --git a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java index a6ea02a5423c4..8358dadf9cc3a 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java @@ -471,7 +471,7 @@ public void testListAppendFieldValueWithDuplicate() { @SuppressWarnings("unchecked") List list = (List) object; assertThat(list.size(), equalTo(3)); - assertThat(list, equalTo(org.opensearch.common.collect.List.of("foo", "bar", "baz"))); + assertThat(list, equalTo(List.of("foo", "bar", "baz"))); } public void testListAppendFieldValueWithoutDuplicate() { @@ -481,7 +481,7 @@ public void testListAppendFieldValueWithoutDuplicate() { @SuppressWarnings("unchecked") List list = (List) object; assertThat(list.size(), equalTo(4)); - assertThat(list, equalTo(org.opensearch.common.collect.List.of("foo", "bar", "baz", "foo2"))); + assertThat(list, equalTo(List.of("foo", "bar", "baz", "foo2"))); } public void testListAppendFieldValues() { @@ -499,7 +499,7 @@ public void testListAppendFieldValues() { } public void testListAppendFieldValuesWithoutDuplicates() { - ingestDocument.appendFieldValue("list2", org.opensearch.common.collect.List.of("foo", "bar", "baz", "foo2"), false); + ingestDocument.appendFieldValue("list2", List.of("foo", "bar", "baz", "foo2"), false); Object object = ingestDocument.getSourceAndMetadata().get("list2"); assertThat(object, instanceOf(List.class)); @SuppressWarnings("unchecked") diff --git a/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java index 86e4dd261d408..9f8dda15eeb65 100644 --- a/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java @@ -172,25 +172,16 @@ public void testPipelineProcessorWithPipelineChain() throws Exception { pipeline2Id, null, null, - new CompoundProcessor( - true, - Arrays.asList( - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); }), - pipeline2Processor - ), - Collections.emptyList() - ), + new CompoundProcessor(true, Arrays.asList(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }), pipeline2Processor), Collections.emptyList()), relativeTimeProvider ); relativeTimeProvider = mock(LongSupplier.class); when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(2)); - Pipeline pipeline3 = new Pipeline( - pipeline3Id, - null, - null, - new CompoundProcessor(new TestProcessor(ingestDocument -> { throw new RuntimeException("error"); })), - relativeTimeProvider - ); + Pipeline pipeline3 = new Pipeline(pipeline3Id, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + throw new RuntimeException("error"); + })), relativeTimeProvider); when(ingestService.getPipeline(pipeline1Id)).thenReturn(pipeline1); when(ingestService.getPipeline(pipeline2Id)).thenReturn(pipeline2); when(ingestService.getPipeline(pipeline3Id)).thenReturn(pipeline3); diff --git a/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java b/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java index 343b82e7c034b..16e60cec8c941 100644 --- a/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java @@ -280,14 +280,17 @@ public void testActualCompoundProcessorWithFalseConditional() throws Exception { new HashMap<>(ScriptModule.CORE_CONTEXTS) ); - CompoundProcessor compoundProcessor = new CompoundProcessor( - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); }), + CompoundProcessor compoundProcessor = new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }), new ConditionalProcessor( randomAlphaOfLength(10), null, new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), scriptService, - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); }) + new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key2, randomInt()); + }) ), new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }) ); @@ -331,16 +334,11 @@ public void testActualPipelineProcessor() throws Exception { String key2 = randomAlphaOfLength(10); String key3 = randomAlphaOfLength(10); - Pipeline pipeline = new Pipeline( - pipelineId, - null, - null, - new CompoundProcessor( - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); }), - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); }), - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }) - ) - ); + Pipeline pipeline = new Pipeline(pipelineId, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }), new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); }), new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key3, randomInt()); + }))); when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, null, pipelineConfig); @@ -406,29 +404,24 @@ public void testActualPipelineProcessorWithTrueConditional() throws Exception { new HashMap<>(ScriptModule.CORE_CONTEXTS) ); - Pipeline pipeline1 = new Pipeline( - pipelineId1, - null, - null, - new CompoundProcessor( - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); }), - new ConditionalProcessor( - randomAlphaOfLength(10), - null, - new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), - scriptService, - factory.create(Collections.emptyMap(), "pipeline1", null, pipelineConfig2) - ), - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }) - ) - ); + Pipeline pipeline1 = new Pipeline(pipelineId1, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }), + new ConditionalProcessor( + randomAlphaOfLength(10), + null, + new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), + scriptService, + factory.create(Collections.emptyMap(), "pipeline1", null, pipelineConfig2) + ), + new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key3, randomInt()); + }) + )); - Pipeline pipeline2 = new Pipeline( - pipelineId2, - null, - null, - new CompoundProcessor(new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); })) - ); + Pipeline pipeline2 = new Pipeline(pipelineId2, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key2, randomInt()); + }))); when(ingestService.getPipeline(pipelineId1)).thenReturn(pipeline1); when(ingestService.getPipeline(pipelineId2)).thenReturn(pipeline2); @@ -503,29 +496,24 @@ public void testActualPipelineProcessorWithFalseConditional() throws Exception { new HashMap<>(ScriptModule.CORE_CONTEXTS) ); - Pipeline pipeline1 = new Pipeline( - pipelineId1, - null, - null, - new CompoundProcessor( - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); }), - new ConditionalProcessor( - randomAlphaOfLength(10), - null, - new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), - scriptService, - factory.create(Collections.emptyMap(), null, null, pipelineConfig2) - ), - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }) - ) - ); + Pipeline pipeline1 = new Pipeline(pipelineId1, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }), + new ConditionalProcessor( + randomAlphaOfLength(10), + null, + new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), + scriptService, + factory.create(Collections.emptyMap(), null, null, pipelineConfig2) + ), + new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key3, randomInt()); + }) + )); - Pipeline pipeline2 = new Pipeline( - pipelineId2, - null, - null, - new CompoundProcessor(new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); })) - ); + Pipeline pipeline2 = new Pipeline(pipelineId2, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key2, randomInt()); + }))); when(ingestService.getPipeline(pipelineId1)).thenReturn(pipeline1); when(ingestService.getPipeline(pipelineId2)).thenReturn(pipeline2); @@ -579,20 +567,18 @@ public void testActualPipelineProcessorWithHandledFailure() throws Exception { String key2 = randomAlphaOfLength(10); String key3 = randomAlphaOfLength(10); - Pipeline pipeline = new Pipeline( - pipelineId, - null, - null, + Pipeline pipeline = new Pipeline(pipelineId, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }), new CompoundProcessor( - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); }), - new CompoundProcessor( - false, - Collections.singletonList(new TestProcessor(ingestDocument -> { throw exception; })), - Collections.singletonList(new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); })) - ), - new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }) - ) - ); + false, + Collections.singletonList(new TestProcessor(ingestDocument -> { throw exception; })), + Collections.singletonList(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key2, randomInt()); + })) + ), + new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }) + )); when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, null, pipelineConfig); @@ -650,7 +636,9 @@ public void testActualPipelineProcessorWithUnhandledFailure() throws Exception { null, new CompoundProcessor( new TestProcessor(ingestDocument -> ingestDocument.setFieldValue(key1, randomInt())), - new TestProcessor(ingestDocument -> { throw exception; }) + new TestProcessor(ingestDocument -> { + throw exception; + }) ) ); when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); @@ -730,12 +718,9 @@ public void testActualPipelineProcessorRepeatedInvocation() throws Exception { String key1 = randomAlphaOfLength(10); PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, null, pipelineConfig); - Pipeline pipeline = new Pipeline( - pipelineId, - null, - null, - new CompoundProcessor(new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key1, randomInt()); })) - ); + Pipeline pipeline = new Pipeline(pipelineId, null, null, new CompoundProcessor(new TestProcessor(ingestDocument -> { + ingestDocument.setFieldValue(key1, randomInt()); + }))); when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline); // calls the same pipeline twice diff --git a/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java b/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java index 209b0d959c8b0..c58847bb56a3b 100644 --- a/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java +++ b/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java @@ -36,10 +36,9 @@ public class LegacyBM25SimilarityTests extends BaseSimilarityTestCase { public void testIllegalK1() { - IllegalArgumentException expected = expectThrows( - IllegalArgumentException.class, - () -> { new LegacyBM25Similarity(Float.POSITIVE_INFINITY, 0.75f); } - ); + IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { + new LegacyBM25Similarity(Float.POSITIVE_INFINITY, 0.75f); + }); assertTrue(expected.getMessage().contains("illegal k1 value")); expected = expectThrows(IllegalArgumentException.class, () -> { new LegacyBM25Similarity(-1, 0.75f); }); diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index c7586777387b8..1787d21af38ea 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -922,10 +922,9 @@ public void testNoExtensionConstructors() { class TestExtension implements TestExtensionPoint { private TestExtension() {} } - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> { PluginsService.createExtension(TestExtension.class, TestExtensionPoint.class, plugin); } - ); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + PluginsService.createExtension(TestExtension.class, TestExtensionPoint.class, plugin); + }); assertThat( e, @@ -950,10 +949,9 @@ public TestExtension(TestPlugin plugin) { } } - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> { PluginsService.createExtension(TestExtension.class, TestExtensionPoint.class, plugin); } - ); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + PluginsService.createExtension(TestExtension.class, TestExtensionPoint.class, plugin); + }); assertThat( e, @@ -971,10 +969,9 @@ public TestExtension(TestPlugin plugin) { public void testBadSingleParameterConstructor() { TestPlugin plugin = new TestPlugin(); - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> { PluginsService.createExtension(BadSingleParameterConstructorExtension.class, TestExtensionPoint.class, plugin); } - ); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + PluginsService.createExtension(BadSingleParameterConstructorExtension.class, TestExtensionPoint.class, plugin); + }); assertThat( e, @@ -996,10 +993,9 @@ public void testBadSingleParameterConstructor() { public void testTooManyParametersExtensionConstructors() { TestPlugin plugin = new TestPlugin(); - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> { PluginsService.createExtension(TooManyParametersConstructorExtension.class, TestExtensionPoint.class, plugin); } - ); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + PluginsService.createExtension(TooManyParametersConstructorExtension.class, TestExtensionPoint.class, plugin); + }); assertThat( e, @@ -1019,10 +1015,9 @@ public void testTooManyParametersExtensionConstructors() { public void testThrowingConstructor() { TestPlugin plugin = new TestPlugin(); - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> { PluginsService.createExtension(ThrowingConstructorExtension.class, TestExtensionPoint.class, plugin); } - ); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + PluginsService.createExtension(ThrowingConstructorExtension.class, TestExtensionPoint.class, plugin); + }); assertThat( e, diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6a8999a205be2..2ceab293f1320 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -101,7 +101,7 @@ public void setUp() throws Exception { when(clusterApplierService.threadPool()).thenReturn(threadPool); final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService); - Map typesRegistry = org.opensearch.common.collect.Map.of( + Map typesRegistry = Map.of( TestRepository.TYPE, TestRepository::new, MeteredRepositoryTypeA.TYPE, @@ -391,7 +391,7 @@ public void close() { private static class MeteredRepositoryTypeA extends MeteredBlobStoreRepository { private static final String TYPE = "type-a"; - private static final RepositoryStats STATS = new RepositoryStats(org.opensearch.common.collect.Map.of("GET", 10L)); + private static final RepositoryStats STATS = new RepositoryStats(Map.of("GET", 10L)); private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clusterService) { super( @@ -400,7 +400,7 @@ private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clust mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), - org.opensearch.common.collect.Map.of("bucket", "bucket-a") + Map.of("bucket", "bucket-a") ); } @@ -422,7 +422,7 @@ public BlobPath basePath() { private static class MeteredRepositoryTypeB extends MeteredBlobStoreRepository { private static final String TYPE = "type-b"; - private static final RepositoryStats STATS = new RepositoryStats(org.opensearch.common.collect.Map.of("LIST", 20L)); + private static final RepositoryStats STATS = new RepositoryStats(Map.of("LIST", 20L)); private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clusterService) { super( @@ -431,7 +431,7 @@ private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clust mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), - org.opensearch.common.collect.Map.of("bucket", "bucket-b") + Map.of("bucket", "bucket-b") ); } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java index 4ad3e1ef85f70..cf0b06a3f7d16 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java @@ -37,6 +37,7 @@ import org.opensearch.test.OpenSearchTestCase; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.equalTo; @@ -60,19 +61,14 @@ public void testStatsAreEvictedOnceTheyAreOlderThanRetentionPeriod() { fakeRelativeClock.set(retentionTimeInMillis * 2); int statsToBeRetainedCount = randomInt(10); for (int i = 0; i < statsToBeRetainedCount; i++) { - RepositoryStatsSnapshot repoStats = createRepositoryStats( - new RepositoryStats(org.opensearch.common.collect.Map.of("GET", 10L)) - ); + RepositoryStatsSnapshot repoStats = createRepositoryStats(new RepositoryStats(Map.of("GET", 10L))); repositoriesStatsArchive.archive(repoStats); } List archivedStats = repositoriesStatsArchive.getArchivedStats(); assertThat(archivedStats.size(), equalTo(statsToBeRetainedCount)); for (RepositoryStatsSnapshot repositoryStatsSnapshot : archivedStats) { - assertThat( - repositoryStatsSnapshot.getRepositoryStats().requestCounts, - equalTo(org.opensearch.common.collect.Map.of("GET", 10L)) - ); + assertThat(repositoryStatsSnapshot.getRepositoryStats().requestCounts, equalTo(Map.of("GET", 10L))); } } @@ -129,7 +125,7 @@ private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repository UUIDs.randomBase64UUID(), randomAlphaOfLength(10), randomAlphaOfLength(10), - org.opensearch.common.collect.Map.of("bucket", randomAlphaOfLength(10)), + Map.of("bucket", randomAlphaOfLength(10)), System.currentTimeMillis(), null ); diff --git a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java index 53f124a91f0ac..58ea86dc569d5 100644 --- a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java @@ -104,6 +104,7 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { .put("location", repo) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") .build(); int numDocs = indexDocs(directory); diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index bd4c7c9a4f824..c6f013985dea9 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -118,11 +118,9 @@ public void setup() { new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY) ) ); - restController.registerHandler( - RestRequest.Method.GET, - "/error", - (request, channel, client) -> { throw new IllegalArgumentException("test error"); } - ); + restController.registerHandler(RestRequest.Method.GET, "/error", (request, channel, client) -> { + throw new IllegalArgumentException("test error"); + }); httpServerTransport.start(); } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java index ec2b635d2d608..7a0d80d9538ad 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java @@ -11,43 +11,56 @@ import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Randomness; import org.opensearch.common.Table; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.Index; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; +import java.util.UUID; +import static java.util.Arrays.asList; import static org.hamcrest.CoreMatchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RestCatSegmentReplicationActionTests extends OpenSearchTestCase { - public void testSegmentReplicationAction() { + public void testSegmentReplicationAction() throws IOException { final RestCatSegmentReplicationAction action = new RestCatSegmentReplicationAction(); final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; - final Map> shardSegmentReplicationStates = new HashMap<>(); - final List segmentReplicationStates = new ArrayList<>(); + final Map> shardSegmentReplicationStates = new HashMap<>(); + final List groupStats = new ArrayList<>(); + final long rejectedRequestCount = 5L; for (int i = 0; i < successfulShards; i++) { + final ShardId shardId = new ShardId(new Index("index", "_na_"), i); final SegmentReplicationState state = mock(SegmentReplicationState.class); final ShardRouting shardRouting = mock(ShardRouting.class); when(state.getShardRouting()).thenReturn(shardRouting); - when(shardRouting.shardId()).thenReturn(new ShardId(new Index("index", "_na_"), i)); + + when(shardRouting.shardId()).thenReturn(shardId); + final AllocationId aId = mock(AllocationId.class); + when(aId.getId()).thenReturn(UUID.randomUUID().toString()); + when(shardRouting.allocationId()).thenReturn(aId); when(state.getReplicationId()).thenReturn(randomLongBetween(0, 1000)); final ReplicationTimer timer = mock(ReplicationTimer.class); final long startTime = randomLongBetween(0, new Date().getTime()); @@ -60,19 +73,30 @@ public void testSegmentReplicationAction() { when(state.getSourceDescription()).thenReturn("Source"); final DiscoveryNode targetNode = mock(DiscoveryNode.class); when(targetNode.getHostName()).thenReturn(randomAlphaOfLength(8)); + when(targetNode.getName()).thenReturn(UUID.randomUUID().toString()); when(state.getTargetNode()).thenReturn(targetNode); ReplicationLuceneIndex index = createTestIndex(); when(state.getIndex()).thenReturn(index); - // - - segmentReplicationStates.add(state); + final SegmentReplicationShardStats segmentReplicationShardStats = new SegmentReplicationShardStats( + state.getShardRouting().allocationId().getId(), + 0L, + 0L, + 0L, + 0L + ); + segmentReplicationShardStats.setCurrentReplicationState(state); + final SegmentReplicationPerGroupStats perGroupStats = new SegmentReplicationPerGroupStats( + shardId, + Set.of(segmentReplicationShardStats), + rejectedRequestCount + ); + groupStats.add(perGroupStats); } - final List shuffle = new ArrayList<>(segmentReplicationStates); - Randomness.shuffle(shuffle); - shardSegmentReplicationStates.put("index", shuffle); + Randomness.shuffle(groupStats); + shardSegmentReplicationStates.put("index", groupStats); final List shardFailures = new ArrayList<>(); final SegmentReplicationStatsResponse response = new SegmentReplicationStatsResponse( @@ -88,18 +112,15 @@ public void testSegmentReplicationAction() { List headers = table.getHeaders(); - final List expectedHeaders = Arrays.asList( - "index", + final List expectedHeaders = asList( "shardId", - "time", - "stage", - "source_description", - "target_host", "target_node", - "files_fetched", - "files_percent", - "bytes_fetched", - "bytes_percent" + "target_host", + "checkpoints_behind", + "bytes_behind", + "current_lag", + "last_completed_lag", + "rejected_requests" ); for (int i = 0; i < expectedHeaders.size(); i++) { @@ -109,19 +130,20 @@ public void testSegmentReplicationAction() { assertThat(table.getRows().size(), equalTo(successfulShards)); for (int i = 0; i < successfulShards; i++) { - final SegmentReplicationState state = segmentReplicationStates.get(i); - final List expectedValues = Arrays.asList( - "index", - i, - new TimeValue(state.getTimer().time()), - state.getStage().name().toLowerCase(Locale.ROOT), - state.getSourceDescription(), - state.getTargetNode().getHostName(), - state.getTargetNode().getName(), - state.getIndex().recoveredFileCount(), - percent(state.getIndex().recoveredFilesPercent()), - state.getIndex().recoveredBytes(), - percent(state.getIndex().recoveredBytesPercent()) + final SegmentReplicationPerGroupStats perGroupStats = groupStats.get(i); + final Set replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + final SegmentReplicationShardStats shardStats = replicaStats.stream().findFirst().get(); + final SegmentReplicationState currentReplicationState = shardStats.getCurrentReplicationState(); + final List expectedValues = asList( + perGroupStats.getShardId(), + currentReplicationState.getTargetNode().getName(), + currentReplicationState.getTargetNode().getHostName(), + shardStats.getCheckpointsBehindCount(), + new ByteSizeValue(shardStats.getBytesBehindCount()), + new TimeValue(shardStats.getCurrentReplicationTimeMillis()), + new TimeValue(shardStats.getLastCompletedReplicationTimeMillis()), + rejectedRequestCount ); final List cells = table.getRows().get(i); diff --git a/server/src/test/java/org/opensearch/script/ScriptServiceTests.java b/server/src/test/java/org/opensearch/script/ScriptServiceTests.java index f5fc62719564f..0485fc9935f15 100644 --- a/server/src/test/java/org/opensearch/script/ScriptServiceTests.java +++ b/server/src/test/java/org/opensearch/script/ScriptServiceTests.java @@ -391,10 +391,9 @@ public void testDeleteScript() throws Exception { assertNull(scriptMetadata.getStoredScript("_id")); ScriptMetadata errorMetadata = scriptMetadata; - ResourceNotFoundException e = expectThrows( - ResourceNotFoundException.class, - () -> { ScriptMetadata.deleteStoredScript(errorMetadata, "_id"); } - ); + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> { + ScriptMetadata.deleteStoredScript(errorMetadata, "_id"); + }); assertEquals("stored script [_id] does not exist and cannot be deleted", e.getMessage()); } @@ -425,24 +424,17 @@ public void testGetStoredScript() throws Exception { public void testMaxSizeLimit() throws Exception { buildScriptService(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 4).build()); scriptService.compile(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(contexts.values())); - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> { - scriptService.compile( - new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), - randomFrom(contexts.values()) - ); - } - ); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> { + scriptService.compile(new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), randomFrom(contexts.values())); + }); assertEquals("exceeded max allowed inline script size in bytes [4] with size [5] for script [10+10]", iae.getMessage()); clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 6).build()); scriptService.compile(new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), randomFrom(contexts.values())); clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 5).build()); scriptService.compile(new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), randomFrom(contexts.values())); - iae = expectThrows( - IllegalArgumentException.class, - () -> { clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 2).build()); } - ); + iae = expectThrows(IllegalArgumentException.class, () -> { + clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 2).build()); + }); assertEquals( "script.max_size_in_bytes cannot be set to [2], stored script [test1] exceeds the new value with a size of [3]", iae.getMessage() @@ -537,10 +529,9 @@ public void testUseContextSettingValue() { assertEquals(ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING.get(s), ScriptService.USE_CONTEXT_RATE_VALUE); - IllegalArgumentException illegal = expectThrows( - IllegalArgumentException.class, - () -> { ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING.getAsMap(s); } - ); + IllegalArgumentException illegal = expectThrows(IllegalArgumentException.class, () -> { + ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING.getAsMap(s); + }); assertEquals("parameter must contain a positive integer and a timevalue, i.e. 10/1m, but was [use-context]", illegal.getMessage()); assertSettingDeprecationsAndWarnings(new Setting[] { SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING }); diff --git a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java index 5944e2a35b14a..60330f189362e 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java @@ -43,7 +43,7 @@ public void testDeletePitResponseToXContent() throws IOException { public void testDeletePitResponseToAndFromXContent() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); DeletePitResponse originalResponse = createDeletePitResponseTestItem(); - ; + BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); DeletePitResponse parsedResponse; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { diff --git a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java index bd0fbfe69960c..961b7bc3dec3f 100644 --- a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java @@ -76,6 +76,7 @@ public void testSerialization() throws Exception { DocValueFormat vf = in.readNamedWriteable(DocValueFormat.class); assertEquals(DocValueFormat.Decimal.class, vf.getClass()); assertEquals("###.##", ((DocValueFormat.Decimal) vf).pattern); + assertEquals(decimalFormat, vf); DateFormatter formatter = DateFormatter.forPattern("epoch_second"); DocValueFormat.DateTime dateFormat = new DocValueFormat.DateTime(formatter, ZoneOffset.ofHours(1), Resolution.MILLISECONDS); @@ -87,6 +88,7 @@ public void testSerialization() throws Exception { assertEquals("epoch_second", ((DocValueFormat.DateTime) vf).formatter.pattern()); assertEquals(ZoneOffset.ofHours(1), ((DocValueFormat.DateTime) vf).timeZone); assertEquals(Resolution.MILLISECONDS, ((DocValueFormat.DateTime) vf).resolution); + assertEquals(dateFormat, vf); DocValueFormat.DateTime nanosDateFormat = new DocValueFormat.DateTime(formatter, ZoneOffset.ofHours(1), Resolution.NANOSECONDS); out = new BytesStreamOutput(); @@ -97,6 +99,7 @@ public void testSerialization() throws Exception { assertEquals("epoch_second", ((DocValueFormat.DateTime) vf).formatter.pattern()); assertEquals(ZoneOffset.ofHours(1), ((DocValueFormat.DateTime) vf).timeZone); assertEquals(Resolution.NANOSECONDS, ((DocValueFormat.DateTime) vf).resolution); + assertEquals(nanosDateFormat, vf); out = new BytesStreamOutput(); out.writeNamedWriteable(DocValueFormat.GEOHASH); @@ -230,4 +233,23 @@ public void testDecimalParse() { assertEquals(0.859d, parser.parseDouble("0.859", true, null), 0.0d); assertEquals(0.8598023539251286d, parser.parseDouble("0.8598023539251286", true, null), 0.0d); } + + public void testLongParse() { + assertEquals(DocValueFormat.RAW.format(0), 0L); + assertEquals(DocValueFormat.RAW.format(-1), -1L); + assertEquals(DocValueFormat.RAW.format(1), 1L); + assertEquals(DocValueFormat.RAW.format(0d), 0d); + assertEquals(DocValueFormat.RAW.format(9.5d), 9.5d); + assertEquals(DocValueFormat.RAW.format(-1d), -1d); + } + + public void testGeoTileParse() { + assertEquals(DocValueFormat.GEOTILE.format(longEncode(0, 0, 0)), "0/0/0"); + assertEquals(DocValueFormat.GEOTILE.format(longEncode(30, 70, 15)), "15/19114/7333"); + assertEquals(DocValueFormat.GEOTILE.format(longEncode(179.999, 89.999, 29)), "29/536869420/0"); + assertEquals(DocValueFormat.GEOTILE.format(longEncode(-179.999, -89.999, 29)), "29/1491/536870911"); + assertEquals(DocValueFormat.GEOTILE.format(longEncode(1, 1, 2)), "2/2/1"); + assertEquals(DocValueFormat.GEOTILE.format(longEncode(13, 95, 1)), "1/1/0"); + assertEquals(DocValueFormat.GEOTILE.format(longEncode(13, -95, 1)), "1/1/1"); + } } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 1f824d40eb638..7c341c9d3cc43 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -740,13 +740,9 @@ public void testOpenScrollContextsConcurrently() throws Exception { public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { @Override public List> getQueries() { - return singletonList( - new QuerySpec<>( - "fail_on_rewrite_query", - FailOnRewriteQueryBuilder::new, - parseContext -> { throw new UnsupportedOperationException("No query parser for this plugin"); } - ) - ); + return singletonList(new QuerySpec<>("fail_on_rewrite_query", FailOnRewriteQueryBuilder::new, parseContext -> { + throw new UnsupportedOperationException("No query parser for this plugin"); + })); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollectorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollectorTests.java index 051056f7e0fdc..21e08b034f1d0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollectorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollectorTests.java @@ -73,16 +73,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }, (deferringCollector, finalCollector) -> { deferringCollector.prepareSelectedBuckets(0, 8, 9); - equalTo( - org.opensearch.common.collect.Map.of( - 0L, - org.opensearch.common.collect.List.of(0, 1, 2, 3, 4, 5, 6, 7), - 1L, - org.opensearch.common.collect.List.of(8), - 2L, - org.opensearch.common.collect.List.of(9) - ) - ); + equalTo(Map.of(0L, List.of(0, 1, 2, 3, 4, 5, 6, 7), 1L, List.of(8), 2L, List.of(9))); }); } @@ -99,19 +90,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }, (deferringCollector, finalCollector) -> { deferringCollector.prepareSelectedBuckets(0, 8, 9); - assertThat( - finalCollector.collection, - equalTo( - org.opensearch.common.collect.Map.of( - 0L, - org.opensearch.common.collect.List.of(4, 5, 6, 7), - 1L, - org.opensearch.common.collect.List.of(8), - 2L, - org.opensearch.common.collect.List.of(9) - ) - ) - ); + assertThat(finalCollector.collection, equalTo(Map.of(0L, List.of(4, 5, 6, 7), 1L, List.of(8), 2L, List.of(9)))); }); } @@ -129,19 +108,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }, (deferringCollector, finalCollector) -> { deferringCollector.prepareSelectedBuckets(0, 8, 9); - assertThat( - finalCollector.collection, - equalTo( - org.opensearch.common.collect.Map.of( - 0L, - org.opensearch.common.collect.List.of(0, 1, 2, 3), - 1L, - org.opensearch.common.collect.List.of(8), - 2L, - org.opensearch.common.collect.List.of(9) - ) - ) - ); + assertThat(finalCollector.collection, equalTo(Map.of(0L, List.of(0, 1, 2, 3), 1L, List.of(8), 2L, List.of(9)))); }); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 25003e0b84567..eabc4b7764eed 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -89,7 +89,9 @@ public void testUnmappedFieldWithTerms() throws Exception { Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset, () -> new CompositeAggregationBuilder("name", Arrays.asList(new TermsValuesSourceBuilder("unmapped").field("unmapped"))), - (result) -> { assertEquals(0, result.getBuckets().size()); } + (result) -> { + assertEquals(0, result.getBuckets().size()); + } ); testSearchCase( @@ -114,7 +116,9 @@ public void testUnmappedFieldWithTerms() throws Exception { "name", Arrays.asList(new TermsValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true)) ).aggregateAfter(Collections.singletonMap("unmapped", null)), - (result) -> { assertEquals(0, result.getBuckets().size()); } + (result) -> { + assertEquals(0, result.getBuckets().size()); + } ); testSearchCase( @@ -127,7 +131,9 @@ public void testUnmappedFieldWithTerms() throws Exception { new TermsValuesSourceBuilder("unmapped").field("unmapped") ) ), - (result) -> { assertEquals(0, result.getBuckets().size()); } + (result) -> { + assertEquals(0, result.getBuckets().size()); + } ); testSearchCase( diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFilterTests.java index b856a81515b52..110773a82404d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFilterTests.java @@ -92,7 +92,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } }; PipelineTree tree = new PipelineTree( - org.opensearch.common.collect.Map.of(inner.getName(), new PipelineTree(emptyMap(), singletonList(mockPipeline))), + Map.of(inner.getName(), new PipelineTree(emptyMap(), singletonList(mockPipeline))), emptyList() ); InternalFilter reduced = (InternalFilter) test.reducePipelines(test, emptyReduceContextBuilder().forFinalReduction(), tree); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFiltersTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFiltersTests.java index 176bfefdefc1e..038efc9f7c4f4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFiltersTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/InternalFiltersTests.java @@ -156,7 +156,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } }; PipelineTree tree = new PipelineTree( - org.opensearch.common.collect.Map.of(inner.getName(), new PipelineTree(emptyMap(), singletonList(mockPipeline))), + Map.of(inner.getName(), new PipelineTree(emptyMap(), singletonList(mockPipeline))), emptyList() ); InternalFilters reduced = (InternalFilters) test.reducePipelines(test, emptyReduceContextBuilder().forFinalReduction(), tree); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 0f49e02febabe..315f148ad5a02 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -305,7 +305,7 @@ public void testAsSubAggWithIncreasedRounding() throws IOException { int n = 0; for (long d = start; d < end; d += anHour) { docs.add( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField(AGGREGABLE_DATE, d), new SortedSetDocValuesField("k1", aBytes), new SortedSetDocValuesField("k1", d < useC ? bBytes : cBytes), @@ -373,12 +373,7 @@ public void testAsSubAggInManyBuckets() throws IOException { List> docs = new ArrayList<>(); int n = 0; for (long d = start; d < end; d += anHour) { - docs.add( - org.opensearch.common.collect.List.of( - new SortedNumericDocValuesField(AGGREGABLE_DATE, d), - new SortedNumericDocValuesField("n", n % 100) - ) - ); + docs.add(List.of(new SortedNumericDocValuesField(AGGREGABLE_DATE, d), new SortedNumericDocValuesField("n", n % 100))); n++; } /* diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java index ff9122aa42326..f3cda87342c18 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java @@ -48,6 +48,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.function.Consumer; public abstract class DateHistogramAggregatorTestCase extends AggregatorTestCase { @@ -63,7 +64,7 @@ protected final void asSubAggTestCase(Aggregatio throws IOException { CheckedBiConsumer buildIndex = (iw, dft) -> { iw.addDocument( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField(AGGREGABLE_DATE, dft.parse("2020-02-01T00:00:00Z")), new SortedSetDocValuesField("k1", new BytesRef("a")), new SortedSetDocValuesField("k2", new BytesRef("a")), @@ -71,7 +72,7 @@ protected final void asSubAggTestCase(Aggregatio ) ); iw.addDocument( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField(AGGREGABLE_DATE, dft.parse("2020-03-01T00:00:00Z")), new SortedSetDocValuesField("k1", new BytesRef("a")), new SortedSetDocValuesField("k2", new BytesRef("a")), @@ -79,7 +80,7 @@ protected final void asSubAggTestCase(Aggregatio ) ); iw.addDocument( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField(AGGREGABLE_DATE, dft.parse("2021-02-01T00:00:00Z")), new SortedSetDocValuesField("k1", new BytesRef("a")), new SortedSetDocValuesField("k2", new BytesRef("a")), @@ -87,7 +88,7 @@ protected final void asSubAggTestCase(Aggregatio ) ); iw.addDocument( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField(AGGREGABLE_DATE, dft.parse("2021-03-01T00:00:00Z")), new SortedSetDocValuesField("k1", new BytesRef("a")), new SortedSetDocValuesField("k2", new BytesRef("b")), @@ -95,7 +96,7 @@ protected final void asSubAggTestCase(Aggregatio ) ); iw.addDocument( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField(AGGREGABLE_DATE, dft.parse("2020-02-01T00:00:00Z")), new SortedSetDocValuesField("k1", new BytesRef("b")), new SortedSetDocValuesField("k2", new BytesRef("b")), diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 597175d89bcfe..7bd39c72ae325 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -193,14 +193,14 @@ public void testAsSubAgg() throws IOException { InternalDateHistogram adh = a.getAggregations().get("dh"); assertThat( adh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()), - equalTo(org.opensearch.common.collect.List.of("2020-01-01T00:00Z", "2021-01-01T00:00Z")) + equalTo(List.of("2020-01-01T00:00Z", "2021-01-01T00:00Z")) ); StringTerms.Bucket b = terms.getBucketByKey("b"); InternalDateHistogram bdh = b.getAggregations().get("dh"); assertThat( bdh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()), - equalTo(org.opensearch.common.collect.List.of("2020-01-01T00:00Z")) + equalTo(List.of("2020-01-01T00:00Z")) ); }); builder = new TermsAggregationBuilder("k2").field("k2").subAggregation(builder); @@ -211,7 +211,7 @@ public void testAsSubAgg() throws IOException { InternalDateHistogram ak1adh = ak1a.getAggregations().get("dh"); assertThat( ak1adh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()), - equalTo(org.opensearch.common.collect.List.of("2020-01-01T00:00Z", "2021-01-01T00:00Z")) + equalTo(List.of("2020-01-01T00:00Z", "2021-01-01T00:00Z")) ); StringTerms.Bucket b = terms.getBucketByKey("b"); @@ -220,13 +220,13 @@ public void testAsSubAgg() throws IOException { InternalDateHistogram bk1adh = bk1a.getAggregations().get("dh"); assertThat( bk1adh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()), - equalTo(org.opensearch.common.collect.List.of("2021-01-01T00:00Z")) + equalTo(List.of("2021-01-01T00:00Z")) ); StringTerms.Bucket bk1b = bk1.getBucketByKey("b"); InternalDateHistogram bk1bdh = bk1b.getAggregations().get("dh"); assertThat( bk1bdh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()), - equalTo(org.opensearch.common.collect.List.of("2020-01-01T00:00Z")) + equalTo(List.of("2020-01-01T00:00Z")) ); }); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java index e7b22a9a57476..bb9ed263ca3f6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java @@ -303,10 +303,9 @@ public void testIncorrectFieldType() throws Exception { try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - expectThrows( - IllegalArgumentException.class, - () -> { searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, keywordField("field")); } - ); + expectThrows(IllegalArgumentException.class, () -> { + searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, keywordField("field")); + }); } } @@ -420,7 +419,7 @@ public void testAsSubAgg() throws IOException { List> docs = new ArrayList<>(); for (int n = 0; n < 10000; n++) { docs.add( - org.opensearch.common.collect.List.of( + List.of( new SortedNumericDocValuesField("outer", n % 100), new SortedNumericDocValuesField("inner", n / 100), new SortedNumericDocValuesField("n", n) diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java index 41bd0d77bff00..3ee9765e445fd 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java @@ -492,14 +492,14 @@ public void testAsSubAgg() throws IOException { List> docs = new ArrayList<>(); for (int n = 0; n < 10000; n++) { BytesRef outerRange = RangeType.LONG.encodeRanges( - org.opensearch.common.collect.Set.of(new RangeFieldMapper.Range(RangeType.LONG, n % 100, n % 100 + 10, true, true)) + Set.of(new RangeFieldMapper.Range(RangeType.LONG, n % 100, n % 100 + 10, true, true)) ); BytesRef innerRange = RangeType.LONG.encodeRanges( - org.opensearch.common.collect.Set.of(new RangeFieldMapper.Range(RangeType.LONG, n / 100, n / 100 + 10, true, true)) + Set.of(new RangeFieldMapper.Range(RangeType.LONG, n / 100, n / 100 + 10, true, true)) ); docs.add( - org.opensearch.common.collect.List.of( + List.of( new BinaryDocValuesField("outer", outerRange), new BinaryDocValuesField("inner", innerRange), new SortedNumericDocValuesField("n", n) diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java index b1d62f3402bc3..33f2a0b56b5ba 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java @@ -493,22 +493,12 @@ public void testAsSubAggregation() throws IOException { AggregationBuilder builder = new TermsAggregationBuilder("t").field("t") .subAggregation(new VariableWidthHistogramAggregationBuilder("v").field("v").setNumBuckets(2)); CheckedConsumer buildIndex = iw -> { - iw.addDocument( - org.opensearch.common.collect.List.of(new SortedNumericDocValuesField("t", 1), new SortedNumericDocValuesField("v", 1)) - ); - iw.addDocument( - org.opensearch.common.collect.List.of(new SortedNumericDocValuesField("t", 1), new SortedNumericDocValuesField("v", 10)) - ); - iw.addDocument( - org.opensearch.common.collect.List.of(new SortedNumericDocValuesField("t", 1), new SortedNumericDocValuesField("v", 11)) - ); + iw.addDocument(List.of(new SortedNumericDocValuesField("t", 1), new SortedNumericDocValuesField("v", 1))); + iw.addDocument(List.of(new SortedNumericDocValuesField("t", 1), new SortedNumericDocValuesField("v", 10))); + iw.addDocument(List.of(new SortedNumericDocValuesField("t", 1), new SortedNumericDocValuesField("v", 11))); - iw.addDocument( - org.opensearch.common.collect.List.of(new SortedNumericDocValuesField("t", 2), new SortedNumericDocValuesField("v", 20)) - ); - iw.addDocument( - org.opensearch.common.collect.List.of(new SortedNumericDocValuesField("t", 2), new SortedNumericDocValuesField("v", 30)) - ); + iw.addDocument(List.of(new SortedNumericDocValuesField("t", 2), new SortedNumericDocValuesField("v", 20))); + iw.addDocument(List.of(new SortedNumericDocValuesField("t", 2), new SortedNumericDocValuesField("v", 30))); }; Consumer verify = terms -> { /* @@ -520,14 +510,14 @@ public void testAsSubAggregation() throws IOException { InternalVariableWidthHistogram v1 = t1.getAggregations().get("v"); assertThat( v1.getBuckets().stream().map(InternalVariableWidthHistogram.Bucket::centroid).collect(toList()), - equalTo(org.opensearch.common.collect.List.of(1.0, 10.5)) + equalTo(List.of(1.0, 10.5)) ); LongTerms.Bucket t2 = terms.getBucketByKey("1"); InternalVariableWidthHistogram v2 = t2.getAggregations().get("v"); assertThat( v2.getBuckets().stream().map(InternalVariableWidthHistogram.Bucket::centroid).collect(toList()), - equalTo(org.opensearch.common.collect.List.of(20.0, 30)) + equalTo(List.of(20.0, 30)) ); }; Exception e = expectThrows( @@ -550,10 +540,12 @@ public void testSmallShardSize() throws Exception { IllegalArgumentException.class, () -> testSearchCase( DEFAULT_QUERY, - org.opensearch.common.collect.List.of(), + List.of(), true, aggregation -> aggregation.field(NUMERIC_FIELD).setNumBuckets(2).setShardSize(2), - histogram -> { fail(); } + histogram -> { + fail(); + } ) ); assertThat(e.getMessage(), equalTo("3/4 of shard_size must be at least buckets but was [1<2] for [_name]")); @@ -568,7 +560,7 @@ public void testHugeShardSize() throws Exception { aggregation -> aggregation.field(NUMERIC_FIELD).setShardSize(1000000000), histogram -> assertThat( histogram.getBuckets().stream().map(InternalVariableWidthHistogram.Bucket::getKey).collect(toList()), - equalTo(org.opensearch.common.collect.List.of(1.0, 2.0, 3.0)) + equalTo(List.of(1.0, 2.0, 3.0)) ) ); } @@ -578,10 +570,12 @@ public void testSmallInitialBuffer() throws Exception { IllegalArgumentException.class, () -> testSearchCase( DEFAULT_QUERY, - org.opensearch.common.collect.List.of(), + List.of(), true, aggregation -> aggregation.field(NUMERIC_FIELD).setInitialBuffer(1), - histogram -> { fail(); } + histogram -> { + fail(); + } ) ); assertThat(e.getMessage(), equalTo("initial_buffer must be at least buckets but was [1<10] for [_name]")); @@ -597,7 +591,7 @@ public void testOutOfOrderInitialBuffer() throws Exception { histogram -> { assertThat( histogram.getBuckets().stream().map(InternalVariableWidthHistogram.Bucket::getKey).collect(toList()), - equalTo(org.opensearch.common.collect.List.of(1.0, 2.0, 3.0)) + equalTo(List.of(1.0, 2.0, 3.0)) ); } ); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java index e888972b8e447..c544dcce45cce 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java @@ -120,7 +120,7 @@ public void testMatchAllDocs() throws IOException { }, internalMissing -> { assertEquals(numDocs, internalMissing.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); - }, org.opensearch.common.collect.List.of(aggFieldType, anotherFieldType)); + }, List.of(aggFieldType, anotherFieldType)); } public void testMatchSparse() throws IOException { @@ -145,7 +145,7 @@ public void testMatchSparse() throws IOException { testCase(newMatchAllQuery(), builder, writer -> writer.addDocuments(docs), internalMissing -> { assertEquals(finalDocsMissingAggField, internalMissing.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); - }, org.opensearch.common.collect.List.of(aggFieldType, anotherFieldType)); + }, List.of(aggFieldType, anotherFieldType)); } public void testMatchSparseRangeField() throws IOException { @@ -225,7 +225,7 @@ public void testMissingParam() throws IOException { }, internalMissing -> { assertEquals(0, internalMissing.getDocCount()); assertFalse(AggregationInspectionHelper.hasValue(internalMissing)); - }, org.opensearch.common.collect.List.of(aggFieldType, anotherFieldType)); + }, List.of(aggFieldType, anotherFieldType)); } public void testMultiValuedField() throws IOException { @@ -241,7 +241,7 @@ public void testMultiValuedField() throws IOException { if (randomBoolean()) { final long randomLong = randomLong(); docs.add( - org.opensearch.common.collect.Set.of( + Set.of( new SortedNumericDocValuesField(aggFieldType.name(), randomLong), new SortedNumericDocValuesField(aggFieldType.name(), randomLong + 1) ) @@ -256,7 +256,7 @@ public void testMultiValuedField() throws IOException { testCase(newMatchAllQuery(), builder, writer -> writer.addDocuments(docs), internalMissing -> { assertEquals(finalDocsMissingAggField, internalMissing.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); - }, org.opensearch.common.collect.List.of(aggFieldType, anotherFieldType)); + }, List.of(aggFieldType, anotherFieldType)); } public void testSingleValuedFieldWithValueScript() throws IOException { @@ -289,12 +289,12 @@ private void valueScriptTestCase(Script script) throws IOException { testCase(newMatchAllQuery(), builder, writer -> writer.addDocuments(docs), internalMissing -> { assertEquals(finalDocsMissingField, internalMissing.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); - }, org.opensearch.common.collect.List.of(aggFieldType, anotherFieldType)); + }, List.of(aggFieldType, anotherFieldType)); } public void testMultiValuedFieldWithFieldScriptWithParams() throws IOException { final long threshold = 10; - final Map params = org.opensearch.common.collect.Map.of("field", "agg_field", "threshold", threshold); + final Map params = Map.of("field", "agg_field", "threshold", threshold); fieldScriptTestCase(new Script(ScriptType.INLINE, MockScriptEngine.NAME, FIELD_SCRIPT_PARAMS, params), threshold); } @@ -320,7 +320,7 @@ private void fieldScriptTestCase(Script script, long threshold) throws IOExcepti docsBelowThreshold++; } docs.add( - org.opensearch.common.collect.Set.of( + Set.of( new SortedNumericDocValuesField(aggFieldType.name(), firstValue), new SortedNumericDocValuesField(aggFieldType.name(), secondValue) ) @@ -362,7 +362,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy @Override protected List getSupportedValuesSourceTypes() { - return org.opensearch.common.collect.List.of( + return List.of( CoreValuesSourceType.NUMERIC, CoreValuesSourceType.BYTES, CoreValuesSourceType.GEOPOINT, diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java index e6d7230aa6cb0..9a2ef3de1dfe4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java @@ -220,13 +220,9 @@ public void testKeywordField() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> testCase( - aggregationBuilder, - new MatchAllDocsQuery(), - iw -> { iw.addDocument(singleton(new SortedSetDocValuesField("string", new BytesRef("foo")))); }, - range -> fail("Should have thrown exception"), - fieldType - ) + () -> testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedSetDocValuesField("string", new BytesRef("foo")))); + }, range -> fail("Should have thrown exception"), fieldType) ); assertEquals("Field [not_a_number] of type [keyword] is not supported for aggregation [date_range]", e.getMessage()); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java index ef5af0b496113..a88d96a7a7614 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java @@ -128,6 +128,6 @@ public void testNumericKeys() throws IOException { ); assertThat(builder.getName(), equalTo("test")); assertThat(builder.field(), equalTo("f")); - assertThat(builder.ranges, equalTo(org.opensearch.common.collect.List.of(new RangeAggregator.Range("1", null, 0d)))); + assertThat(builder.ranges, equalTo(List.of(new RangeAggregator.Range("1", null, 0d)))); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java index f1be4d1ede930..8b9bd388eb641 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java @@ -245,13 +245,9 @@ public void testUnsupportedType() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> testCase( - aggregationBuilder, - new MatchAllDocsQuery(), - iw -> { iw.addDocument(singleton(new SortedSetDocValuesField("string", new BytesRef("foo")))); }, - range -> fail("Should have thrown exception"), - fieldType - ) + () -> testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedSetDocValuesField("string", new BytesRef("foo")))); + }, range -> fail("Should have thrown exception"), fieldType) ); assertEquals("Field [not_a_number] of type [keyword] is not supported for aggregation [range]", e.getMessage()); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java index 505fb7382ab3b..c0b2b55fefeda 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilderTests.java @@ -91,10 +91,9 @@ protected MultiTermsAggregationBuilder createTestAggregatorBuilder() { } public void testInvalidTermsParams() { - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> { new MultiTermsAggregationBuilder("_name").terms(Collections.singletonList(randomFieldConfig())); } - ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + new MultiTermsAggregationBuilder("_name").terms(Collections.singletonList(randomFieldConfig())); + }); assertEquals( "multi term aggregation must has at least 2 terms. Found [1] in [_name] Use terms aggregation for single term aggregation", exception.getMessage() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java index f3922a65ff264..75ad9e12e0776 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java @@ -127,7 +127,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy @Override protected ScriptService getMockScriptService() { - final Map, Object>> scripts = org.opensearch.common.collect.Map.of( + final Map, Object>> scripts = Map.of( VALUE_SCRIPT_NAME, vars -> ((Number) vars.get("_value")).doubleValue() + 1, FIELD_SCRIPT_NAME, diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java index 678bc2fc6f536..debe4d343e874 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java @@ -376,25 +376,13 @@ public void testInsideTerms() throws IOException { StringTerms.Bucket even = terms.getBucketByKey("even"); InternalRareTerms evenRare = even.getAggregations().get("rare"); - assertEquals( - evenRare.getBuckets().stream().map(InternalRareTerms.Bucket::getKeyAsString).collect(toList()), - org.opensearch.common.collect.List.of("2") - ); - assertEquals( - evenRare.getBuckets().stream().map(InternalRareTerms.Bucket::getDocCount).collect(toList()), - org.opensearch.common.collect.List.of(2L) - ); + assertEquals(evenRare.getBuckets().stream().map(InternalRareTerms.Bucket::getKeyAsString).collect(toList()), List.of("2")); + assertEquals(evenRare.getBuckets().stream().map(InternalRareTerms.Bucket::getDocCount).collect(toList()), List.of(2L)); StringTerms.Bucket odd = terms.getBucketByKey("odd"); InternalRareTerms oddRare = odd.getAggregations().get("rare"); - assertEquals( - oddRare.getBuckets().stream().map(InternalRareTerms.Bucket::getKeyAsString).collect(toList()), - org.opensearch.common.collect.List.of("1") - ); - assertEquals( - oddRare.getBuckets().stream().map(InternalRareTerms.Bucket::getDocCount).collect(toList()), - org.opensearch.common.collect.List.of(1L) - ); + assertEquals(oddRare.getBuckets().stream().map(InternalRareTerms.Bucket::getKeyAsString).collect(toList()), List.of("1")); + assertEquals(oddRare.getBuckets().stream().map(InternalRareTerms.Bucket::getDocCount).collect(toList()), List.of(1L)); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java index bbf7f6cfd9cc2..e08ac840e1785 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java @@ -113,16 +113,9 @@ public void testRangeField() throws IOException { MappedFieldType fieldType = new RangeFieldMapper.RangeFieldType(fieldName, RangeType.DOUBLE); RangeFieldMapper.Range range = new RangeFieldMapper.Range(RangeType.DOUBLE, 1.0D, 5.0D, true, true); BytesRef encodedRange = RangeType.DOUBLE.encodeRanges(Collections.singleton(range)); - expectThrows( - IllegalArgumentException.class, - () -> testCase( - new DocValuesFieldExistsQuery(fieldName), - iw -> { iw.addDocument(singleton(new BinaryDocValuesField(fieldName, encodedRange))); }, - hdr -> {}, - fieldType, - fieldName - ) - ); + expectThrows(IllegalArgumentException.class, () -> testCase(new DocValuesFieldExistsQuery(fieldName), iw -> { + iw.addDocument(singleton(new BinaryDocValuesField(fieldName, encodedRange))); + }, hdr -> {}, fieldType, fieldName)); } public void testNoMatchingField() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java index 69c53d1a526e8..1a10a2fa35f84 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetricTests.java @@ -279,12 +279,7 @@ protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) public void testOldSerialization() throws IOException { // A single element list looks like a fully reduced agg - InternalScriptedMetric original = new InternalScriptedMetric( - "test", - org.opensearch.common.collect.List.of("foo"), - new Script("test"), - null - ); + InternalScriptedMetric original = new InternalScriptedMetric("test", List.of("foo"), new Script("test"), null); original.mergePipelineTreeForBWCSerialization(PipelineTree.EMPTY); InternalScriptedMetric roundTripped = (InternalScriptedMetric) copyNamedWriteable( original, @@ -295,12 +290,7 @@ public void testOldSerialization() throws IOException { assertThat(roundTripped, equalTo(original)); // A multi-element list looks like a non-reduced agg - InternalScriptedMetric unreduced = new InternalScriptedMetric( - "test", - org.opensearch.common.collect.List.of("foo", "bar"), - new Script("test"), - null - ); + InternalScriptedMetric unreduced = new InternalScriptedMetric("test", List.of("foo", "bar"), new Script("test"), null); unreduced.mergePipelineTreeForBWCSerialization(PipelineTree.EMPTY); Exception e = expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java index 05b3c5c7e57db..f0273b46ef97e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java @@ -302,13 +302,9 @@ public void testUnsupportedType() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> testCase( - aggregationBuilder, - new MatchAllDocsQuery(), - iw -> { iw.addDocument(singleton(new SortedSetDocValuesField("string", new BytesRef("foo")))); }, - (Consumer) min -> { fail("Should have thrown exception"); }, - fieldType - ) + () -> testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedSetDocValuesField("string", new BytesRef("foo")))); + }, (Consumer) min -> { fail("Should have thrown exception"); }, fieldType) ); assertEquals("Field [not_a_number] of type [keyword] is not supported for aggregation [min]", e.getMessage()); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java index f2a61521eff1f..6a4ecf01577d8 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java @@ -593,12 +593,9 @@ public void testInitScriptMakesArray() throws IOException { .mapScript(MAP_SCRIPT) .combineScript(COMBINE_SCRIPT) .reduceScript(REDUCE_SCRIPT); - testCase( - aggregationBuilder, - new MatchAllDocsQuery(), - iw -> { iw.addDocument(new Document()); }, - (InternalScriptedMetric r) -> { assertEquals(1, r.aggregation()); } - ); + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { iw.addDocument(new Document()); }, (InternalScriptedMetric r) -> { + assertEquals(1, r.aggregation()); + }); } public void testAsSubAgg() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java index d8d736595164a..c215c0959b342 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -470,7 +470,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy @Override protected ScriptService getMockScriptService() { - final Map, Object>> scripts = org.opensearch.common.collect.Map.of( + final Map, Object>> scripts = Map.of( VALUE_SCRIPT_NAME, vars -> ((Number) vars.get("_value")).doubleValue() + 1, FIELD_SCRIPT_NAME, diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java index 8c0087ca0b87d..72b09d7509b02 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java @@ -418,7 +418,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy @Override protected ScriptService getMockScriptService() { - final Map, Object>> scripts = org.opensearch.common.collect.Map.of( + final Map, Object>> scripts = Map.of( VALUE_SCRIPT_NAME, vars -> ((Number) vars.get("_value")).doubleValue() + 1, FIELD_SCRIPT_NAME, diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java index 0aff1efff88ef..153ce14c0e385 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java @@ -49,6 +49,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -66,10 +67,7 @@ public void testLeafValues() throws IOException { .endObject() .endObject(); - List fieldAndFormats = org.opensearch.common.collect.List.of( - new FieldAndFormat("field", null), - new FieldAndFormat("object.field", null) - ); + List fieldAndFormats = List.of(new FieldAndFormat("field", null), new FieldAndFormat("object.field", null)); Map fields = fetchFields(mapperService, source, fieldAndFormats); assertThat(fields.size(), equalTo(2)); @@ -100,7 +98,7 @@ public void testObjectValues() throws IOException { DocumentField rangeField = fields.get("float_range"); assertNotNull(rangeField); assertThat(rangeField.getValues().size(), equalTo(1)); - assertThat(rangeField.getValue(), equalTo(org.opensearch.common.collect.Map.of("gte", 0.0f, "lte", 2.718f))); + assertThat(rangeField.getValue(), equalTo(Map.of("gte", 0.0f, "lte", 2.718f))); } public void testNonExistentField() throws IOException { @@ -255,7 +253,7 @@ public void testDateFormat() throws IOException { Map fields = fetchFields( mapperService, source, - org.opensearch.common.collect.List.of(new FieldAndFormat("field", null), new FieldAndFormat("date_field", "yyyy/MM/dd")) + List.of(new FieldAndFormat("field", null), new FieldAndFormat("date_field", "yyyy/MM/dd")) ); assertThat(fields.size(), equalTo(2)); @@ -440,7 +438,7 @@ public void testTextSubFields() throws IOException { private static Map fetchFields(MapperService mapperService, XContentBuilder source, String fieldPattern) throws IOException { - List fields = org.opensearch.common.collect.List.of(new FieldAndFormat(fieldPattern, null)); + List fields = List.of(new FieldAndFormat(fieldPattern, null)); return fetchFields(mapperService, source, fields); } @@ -451,7 +449,7 @@ private static Map fetchFields(MapperService mapperServic sourceLookup.setSource(BytesReference.bytes(source)); FieldFetcher fieldFetcher = FieldFetcher.create(createQueryShardContext(mapperService), null, fields); - return fieldFetcher.fetch(sourceLookup, org.opensearch.common.collect.Set.of()); + return fieldFetcher.fetch(sourceLookup, Set.of()); } public MapperService createMapperService() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java index 6fb53c2ed1da0..d3d7c1aa8d411 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java @@ -53,19 +53,32 @@ public class HighlightFieldTests extends OpenSearchTestCase { public static HighlightField createTestItem() { String name = frequently() ? randomAlphaOfLengthBetween(5, 20) : randomRealisticUnicodeOfCodepointLengthBetween(5, 20); + name = replaceUnicodeControlCharacters(name); Text[] fragments = null; if (frequently()) { int size = randomIntBetween(0, 5); fragments = new Text[size]; for (int i = 0; i < size; i++) { - fragments[i] = new Text( - frequently() ? randomAlphaOfLengthBetween(10, 30) : randomRealisticUnicodeOfCodepointLengthBetween(10, 30) - ); + String fragmentText = frequently() + ? randomAlphaOfLengthBetween(10, 30) + : randomRealisticUnicodeOfCodepointLengthBetween(10, 30); + fragmentText = replaceUnicodeControlCharacters(fragmentText); + fragments[i] = new Text(fragmentText); } } return new HighlightField(name, fragments); } + public void testReplaceUnicodeControlCharacters() { + assertEquals("æÆ ¢¡Èýñ«Ò", replaceUnicodeControlCharacters("æÆ\u0000¢¡Èýñ«Ò")); + assertEquals("test_string_without_control_characters", replaceUnicodeControlCharacters("test_string_without_control_characters")); + assertEquals("æÆ@¢¡Èýñ«Ò", replaceUnicodeControlCharacters("æÆ\u0000¢¡Èýñ«Ò", "@")); + assertEquals( + "test_string_without_control_characters", + replaceUnicodeControlCharacters("test_string_without_control_characters", "@") + ); + } + public void testFromXContent() throws IOException { HighlightField highlightField = createTestItem(); XContentType xcontentType = randomFrom(XContentType.values()); diff --git a/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java b/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java index b24e8f31dd020..37d35586996d0 100644 --- a/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java +++ b/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java @@ -121,26 +121,8 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws public void testToXContent() throws IOException { List children = new ArrayList<>(); - children.add( - new ProfileResult( - "child1", - "desc1", - org.opensearch.common.collect.Map.of("key1", 100L), - org.opensearch.common.collect.Map.of(), - 100L, - org.opensearch.common.collect.List.of() - ) - ); - children.add( - new ProfileResult( - "child2", - "desc2", - org.opensearch.common.collect.Map.of("key1", 123356L), - org.opensearch.common.collect.Map.of(), - 123356L, - org.opensearch.common.collect.List.of() - ) - ); + children.add(new ProfileResult("child1", "desc1", Map.of("key1", 100L), Map.of(), 100L, List.of())); + children.add(new ProfileResult("child2", "desc2", Map.of("key1", 123356L), Map.of(), 123356L, List.of())); Map breakdown = new LinkedHashMap<>(); breakdown.put("key1", 123456L); breakdown.put("stuff", 10000L); @@ -225,14 +207,7 @@ public void testToXContent() throws IOException { Strings.toString(builder) ); - result = new ProfileResult( - "profileName", - "some description", - org.opensearch.common.collect.Map.of("key1", 12345678L), - org.opensearch.common.collect.Map.of(), - 12345678L, - org.opensearch.common.collect.List.of() - ); + result = new ProfileResult("profileName", "some description", Map.of("key1", 12345678L), Map.of(), 12345678L, List.of()); builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true); result.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals( @@ -248,14 +223,7 @@ public void testToXContent() throws IOException { Strings.toString(builder) ); - result = new ProfileResult( - "profileName", - "some description", - org.opensearch.common.collect.Map.of("key1", 1234567890L), - org.opensearch.common.collect.Map.of(), - 1234567890L, - org.opensearch.common.collect.List.of() - ); + result = new ProfileResult("profileName", "some description", Map.of("key1", 1234567890L), Map.of(), 1234567890L, List.of()); builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true); result.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals( diff --git a/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java b/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java index 2827bcd098bcc..3c1f2e08cf3db 100644 --- a/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java +++ b/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java @@ -87,7 +87,7 @@ public void testToXContent() throws IOException { breakdown.put("timing2", 4000L); Map debug = new LinkedHashMap<>(); debug.put("stuff", "stuff"); - debug.put("other_stuff", org.opensearch.common.collect.List.of("foo", "bar")); + debug.put("other_stuff", List.of("foo", "bar")); ProfileResult profileResult = new ProfileResult("someType", "someDescription", breakdown, debug, 6000L, Collections.emptyList()); profileResults.add(profileResult); AggregationProfileShardResult aggProfileResults = new AggregationProfileShardResult(profileResults); diff --git a/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java b/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java index 709cf0c6a763f..e34c48d59ef6e 100644 --- a/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java @@ -102,11 +102,9 @@ public void testFromXContent() throws IOException { public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { // explicit about type parameters, see: https://bugs.eclipse.org/bugs/show_bug.cgi?id=481649 - EqualsHashCodeTestUtils.checkEqualsAndHashCode( - randomSuggestBuilder(), - original -> { return copyWriteable(original, namedWriteableRegistry, SuggestBuilder::new); }, - this::createMutation - ); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomSuggestBuilder(), original -> { + return copyWriteable(original, namedWriteableRegistry, SuggestBuilder::new); + }, this::createMutation); } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index a6677fbc0c99b..b9087db60c271 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -169,6 +169,7 @@ import org.opensearch.gateway.TransportNodesListGatewayStartedShards; import org.opensearch.index.Index; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -1978,6 +1979,7 @@ public void onFailure(final Exception e) { new UpdateHelper(scriptService), actionFilters, new IndexingPressureService(settings, clusterService), + new SegmentReplicationPressureService(settings, clusterService, mock(IndicesService.class)), new SystemIndices(emptyMap()) ); actions.put( diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index 4076e7229ebf7..c340042f99d86 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -63,6 +63,8 @@ import java.io.InputStream; import java.nio.ByteBuffer; import java.util.Collections; +import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -254,8 +256,8 @@ public void testSendsErrorResponseToHandshakeFromCompatibleVersion() throws Exce ); final InboundMessage requestMessage = unreadableInboundHandshake(remoteVersion, requestHeader); requestHeader.actionName = TransportHandshaker.HANDSHAKE_ACTION_NAME; - requestHeader.headers = Tuple.tuple(org.opensearch.common.collect.Map.of(), org.opensearch.common.collect.Map.of()); - requestHeader.features = org.opensearch.common.collect.Set.of(); + requestHeader.headers = Tuple.tuple(Map.of(), Map.of()); + requestHeader.features = Set.of(); handler.inboundMessage(channel, requestMessage); final BytesReference responseBytesReference = channel.getMessageCaptor().get(); @@ -294,8 +296,8 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws ); final InboundMessage requestMessage = unreadableInboundHandshake(remoteVersion, requestHeader); requestHeader.actionName = TransportHandshaker.HANDSHAKE_ACTION_NAME; - requestHeader.headers = Tuple.tuple(org.opensearch.common.collect.Map.of(), org.opensearch.common.collect.Map.of()); - requestHeader.features = org.opensearch.common.collect.Set.of(); + requestHeader.headers = Tuple.tuple(Map.of(), Map.of()); + requestHeader.features = Set.of(); handler.inboundMessage(channel, requestMessage); assertTrue(isClosed.get()); assertNull(channel.getMessageCaptor().get()); @@ -332,7 +334,7 @@ public void testLogsSlowInboundProcessing() throws Exception { }); requestHeader.actionName = TransportHandshaker.HANDSHAKE_ACTION_NAME; requestHeader.headers = Tuple.tuple(Collections.emptyMap(), Collections.emptyMap()); - requestHeader.features = org.opensearch.common.collect.Set.of(); + requestHeader.features = Set.of(); handler.inboundMessage(channel, requestMessage); assertNotNull(channel.getMessageCaptor().get()); mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java index 33f5a8d9e852f..0488f249cb09e 100644 --- a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java @@ -168,12 +168,9 @@ public void testException() throws InterruptedException { }); TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); - serviceC.registerRequestHandler( - "internal:test", - ThreadPool.Names.SAME, - SimpleTestRequest::new, - (request, channel, task) -> { throw new OpenSearchException("greetings from TS_C"); } - ); + serviceC.registerRequestHandler("internal:test", ThreadPool.Names.SAME, SimpleTestRequest::new, (request, channel, task) -> { + throw new OpenSearchException("greetings from TS_C"); + }); TransportActionProxy.registerProxyAction(serviceC, "internal:test", SimpleTestResponse::new); CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java index c0af5d6e76c59..357d7fffa8df6 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java @@ -218,10 +218,9 @@ public void testNodeConnectWithDifferentNodeId() { emptySet(), handleB.discoveryNode.getVersion() ); - ConnectTransportException ex = expectThrows( - ConnectTransportException.class, - () -> { handleA.transportService.connectToNode(discoveryNode, TestProfiles.LIGHT_PROFILE); } - ); + ConnectTransportException ex = expectThrows(ConnectTransportException.class, () -> { + handleA.transportService.connectToNode(discoveryNode, TestProfiles.LIGHT_PROFILE); + }); assertThat(ex.getMessage(), containsString("unexpected remote node")); assertFalse(handleA.transportService.nodeConnected(discoveryNode)); } diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json index 53e5bbd9fa946..4a4fc7d2c81b1 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json @@ -17,7 +17,7 @@ "output_unigrams" : false, "filler_token" : "FILLER" } - } + } } } -} +} \ No newline at end of file diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json index 19b4d24063b8e..5927549c111fd 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis2.json @@ -9,7 +9,7 @@ "output_unigrams" : false, "filler_token" : "FILLER" } - } + } } } } diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b8b5406bd39f3..43f3838999080 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -50,12 +50,12 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.4.8' + api 'net.minidev:json-smart:2.4.9' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:3.21.9" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" api 'org.eclipse.jetty:jetty-server:9.4.49.v20220914' - api 'org.apache.zookeeper:zookeeper:3.8.0' + api 'org.apache.zookeeper:zookeeper:3.8.1' api "org.apache.commons:commons-text:1.10.0" api "commons-net:commons-net:3.9.0" runtimeOnly "com.google.guava:guava:${versions.guava}" diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 00d907ce6d108..255f554db7a79 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -88,7 +88,7 @@ test { systemProperty 'tests.gradle_index_compat_versions', BuildParams.bwcVersions.indexCompatible.join(',') systemProperty 'tests.gradle_wire_compat_versions', BuildParams.bwcVersions.wireCompatible.join(',') systemProperty 'tests.gradle_unreleased_versions', BuildParams.bwcVersions.unreleased.join(',') - + if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_18) { jvmArgs += ["-Djava.security.manager=allow"] } diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index 1d527140dc038..ff0ad0c69e4e5 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -60,11 +60,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Formatter; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.TreeMap; import static java.util.Collections.emptyMap; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -306,6 +309,101 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat } } + /** + * Utility class to show shards distribution across nodes. + */ + public static class ShardAllocations { + + private static final String separator = "==================================================="; + private static final String ONE_LINE_RETURN = "\n"; + private static final String TWO_LINE_RETURN = "\n\n"; + + /** + Store shard primary/replica shard count against a node. + String: NodeId + int[]: tuple storing primary shard count in 0th index and replica's in 1st + */ + static TreeMap nodeToShardCountMap = new TreeMap<>(); + + /** + * Helper map containing NodeName to NodeId + */ + static TreeMap nameToNodeId = new TreeMap<>(); + + /* + Unassigned array containing primary at 0, replica at 1 + */ + static int[] unassigned = new int[2]; + + static int[] totalShards = new int[2]; + + private final static String printShardAllocationWithHeader(int[] shardCount) { + StringBuffer sb = new StringBuffer(); + Formatter formatter = new Formatter(sb, Locale.getDefault()); + formatter.format("%-20s %-20s\n", "P", shardCount[0]); + formatter.format("%-20s %-20s\n", "R", shardCount[1]); + return sb.toString(); + } + + private static void reset() { + nodeToShardCountMap.clear(); + nameToNodeId.clear(); + totalShards[0] = totalShards[1] = 0; + unassigned[0] = unassigned[1] = 0; + } + + private static void buildMap(ClusterState inputState) { + reset(); + for (RoutingNode node : inputState.getRoutingNodes()) { + if (node.node().getName() != null && node.node().getName().isEmpty() == false) { + nameToNodeId.putIfAbsent(node.node().getName(), node.nodeId()); + } else { + nameToNodeId.putIfAbsent(node.nodeId(), node.nodeId()); + } + nodeToShardCountMap.putIfAbsent(node.nodeId(), new int[] { 0, 0 }); + } + for (ShardRouting shardRouting : inputState.routingTable().allShards()) { + // Fetch shard to update. Initialize local array + updateMap(nodeToShardCountMap, shardRouting); + } + } + + private static void updateMap(TreeMap mapToUpdate, ShardRouting shardRouting) { + int[] shard; + shard = shardRouting.assignedToNode() ? mapToUpdate.get(shardRouting.currentNodeId()) : unassigned; + // Update shard type count + if (shardRouting.primary()) { + shard[0]++; + totalShards[0]++; + } else { + shard[1]++; + totalShards[1]++; + } + // For assigned shards, put back counter + if (shardRouting.assignedToNode()) mapToUpdate.put(shardRouting.currentNodeId(), shard); + } + + private static String allocation() { + StringBuffer sb = new StringBuffer(); + sb.append(TWO_LINE_RETURN + separator + ONE_LINE_RETURN); + Formatter formatter = new Formatter(sb, Locale.getDefault()); + for (Map.Entry entry : nameToNodeId.entrySet()) { + String nodeId = nameToNodeId.get(entry.getKey()); + formatter.format("%-20s\n", entry.getKey().toUpperCase(Locale.getDefault())); + sb.append(printShardAllocationWithHeader(nodeToShardCountMap.get(nodeId))); + } + sb.append(ONE_LINE_RETURN); + formatter.format("%-20s (P)%-5s (R)%-5s\n\n", "Unassigned ", unassigned[0], unassigned[1]); + formatter.format("%-20s (P)%-5s (R)%-5s\n\n", "Total Shards", totalShards[0], totalShards[1]); + return sb.toString(); + } + + public static String printShardDistribution(ClusterState state) { + buildMap(state); + return allocation(); + } + } + /** A lock {@link AllocationService} allowing tests to override time */ protected static class MockAllocationService extends AllocationService { diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java index 29207cbc4ada1..76325b6a0035b 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java @@ -205,6 +205,28 @@ public static ShardRouting newShardRouting( ); } + public static ShardRouting newShardRouting( + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + RecoverySource recoverySource, + UnassignedInfo unassignedInfo + ) { + return new ShardRouting( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + recoverySource, + unassignedInfo, + buildAllocationId(state), + -1 + ); + } + public static ShardRouting relocate(ShardRouting shardRouting, String relocatingNodeId, long expectedShardSize) { return shardRouting.relocate(relocatingNodeId, expectedShardSize); } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java index 571f1b21dd7e3..7ed0da8509fab 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldTypeTestCase.java @@ -38,6 +38,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -65,7 +66,7 @@ public static List fetchSourceValue(MappedFieldType fieldType, Object sourceV public static List fetchSourceValue(MappedFieldType fieldType, Object sourceValue, String format) throws IOException { String field = fieldType.name(); QueryShardContext context = mock(QueryShardContext.class); - when(context.sourcePath(field)).thenReturn(org.opensearch.common.collect.Set.of(field)); + when(context.sourcePath(field)).thenReturn(Set.of(field)); ValueFetcher fetcher = fieldType.valueFetcher(context, null, format); SourceLookup lookup = new SourceLookup(); diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index db6c53bd0aa77..4b3405e925fd6 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -155,7 +155,9 @@ protected final MapperService createMapperService(Version version, XContentBuild xContentRegistry(), similarityService, mapperRegistry, - () -> { throw new UnsupportedOperationException(); }, + () -> { + throw new UnsupportedOperationException(); + }, () -> true, scriptService ); @@ -249,9 +251,9 @@ QueryShardContext createQueryShardContext(MapperService mapperService) { inv -> mapperService.simpleMatchToFullName(inv.getArguments()[0].toString()) ); when(queryShardContext.allowExpensiveQueries()).thenReturn(true); - when(queryShardContext.lookup()).thenReturn( - new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); }) - ); + when(queryShardContext.lookup()).thenReturn(new SearchLookup(mapperService, (ft, s) -> { + throw new UnsupportedOperationException("search lookup not available"); + })); return queryShardContext; } } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java index c00a3831c0d8d..151170cd03aff 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java @@ -286,22 +286,22 @@ protected final List fetchFromDocValues(MapperService mapperService, MappedFi throws IOException { BiFunction, IndexFieldData> fieldDataLookup = (mft, lookupSource) -> mft - .fielddataBuilder("test", () -> { throw new UnsupportedOperationException(); }) + .fielddataBuilder("test", () -> { + throw new UnsupportedOperationException(); + }) .build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()); SetOnce> result = new SetOnce<>(); - withLuceneIndex( - mapperService, - iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); }, - iw -> { - SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup); - ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft)); - IndexSearcher searcher = newSearcher(iw); - LeafReaderContext context = searcher.getIndexReader().leaves().get(0); - lookup.source().setSegmentAndDocument(context, 0); - valueFetcher.setNextReader(context); - result.set(valueFetcher.fetchValues(lookup.source())); - } - ); + withLuceneIndex(mapperService, iw -> { + iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); + }, iw -> { + SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup); + ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft)); + IndexSearcher searcher = newSearcher(iw); + LeafReaderContext context = searcher.getIndexReader().leaves().get(0); + lookup.source().setSegmentAndDocument(context, 0); + valueFetcher.setNextReader(context); + result.set(valueFetcher.fetchValues(lookup.source())); + }); return result.get(); } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 8df57ccad85cc..e349fbdd6f1fc 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -237,7 +237,9 @@ protected class ReplicationGroup implements AutoCloseable, Iterable (shardId, primaryAllocationId, primaryTerm, retentionLeases) -> syncRetentionLeases( shardId, retentionLeases, - ActionListener.wrap(r -> {}, e -> { throw new AssertionError("failed to background sync retention lease", e); }) + ActionListener.wrap(r -> {}, e -> { + throw new AssertionError("failed to background sync retention lease", e); + }) ) ); diff --git a/test/framework/src/main/java/org/opensearch/index/seqno/RetentionLeaseUtils.java b/test/framework/src/main/java/org/opensearch/index/seqno/RetentionLeaseUtils.java index bf06b481a2377..8216f1053a60b 100644 --- a/test/framework/src/main/java/org/opensearch/index/seqno/RetentionLeaseUtils.java +++ b/test/framework/src/main/java/org/opensearch/index/seqno/RetentionLeaseUtils.java @@ -53,13 +53,8 @@ public static Map toMapExcludingPeerRecoveryRetentionLea return retentionLeases.leases() .stream() .filter(l -> ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals(l.source()) == false) - .collect( - Collectors.toMap( - RetentionLease::id, - Function.identity(), - (o1, o2) -> { throw new AssertionError("unexpectedly merging " + o1 + " and " + o2); }, - LinkedHashMap::new - ) - ); + .collect(Collectors.toMap(RetentionLease::id, Function.identity(), (o1, o2) -> { + throw new AssertionError("unexpectedly merging " + o1 + " and " + o2); + }, LinkedHashMap::new)); } } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index d39e190a6f124..ab0cf38f77c7d 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1365,6 +1365,10 @@ public void onReplicationDone(SegmentReplicationState state) { assertTrue(recoveryDiff.missing.isEmpty()); assertTrue(recoveryDiff.different.isEmpty()); assertEquals(recoveryDiff.identical.size(), primaryMetadata.size()); + primaryShard.updateVisibleCheckpointForShard( + replica.routingEntry().allocationId().getId(), + primaryShard.getLatestReplicationCheckpoint() + ); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); } finally { diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index f082c7a45a207..85ff45b80fad6 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -358,7 +358,7 @@ public HttpHandler getDelegate() { } synchronized Map getOperationsCount() { - return org.opensearch.common.collect.Map.copyOf(operationCount); + return Map.copyOf(operationCount); } protected synchronized void trackRequest(final String requestType) { diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptService.java b/test/framework/src/main/java/org/opensearch/script/MockScriptService.java index e4887255d0b9f..4fbc4c4d4bc90 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptService.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptService.java @@ -79,14 +79,10 @@ public FactoryType compile( @Override public Set> getSupportedContexts() { - return org.opensearch.common.collect.Set.of(context); + return Set.of(context); } }; - return new MockScriptService( - Settings.EMPTY, - org.opensearch.common.collect.Map.of("lang", engine), - org.opensearch.common.collect.Map.of(context.name, context) - ) { + return new MockScriptService(Settings.EMPTY, Map.of("lang", engine), Map.of(context.name, context)) { @Override protected StoredScriptSource getScriptFromClusterState(String id) { return storedLookup.get(id); diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java index fdf26608509f1..1b5abad719fbd 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java @@ -369,12 +369,9 @@ private static class ServiceHolder implements Closeable { boolean registerType ) throws IOException { this.nowInMillis = nowInMillis; - Environment env = InternalSettingsPreparer.prepareEnvironment( - nodeSettings, - emptyMap(), - null, - () -> { throw new AssertionError("node.name must be set"); } - ); + Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, emptyMap(), null, () -> { + throw new AssertionError("node.name must be set"); + }); PluginsService pluginsService; pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins); diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index ac493bdd68d71..c33fcb69eb9e2 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -732,7 +732,8 @@ protected static String getRandomRewriteMethod() { .getPreferredName(); } else { rewrite = randomFrom(QueryParsers.TOP_TERMS, QueryParsers.TOP_TERMS_BOOST, QueryParsers.TOP_TERMS_BLENDED_FREQS) - .getPreferredName() + "1"; + .getPreferredName() + + "1"; } return rewrite; } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 2de50fabbe396..240f6ef9c6e56 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -218,7 +218,7 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - private static final ByteSizeValue DEFAULT_SEARCH_CACHE_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); + private static final ByteSizeValue DEFAULT_SEARCH_CACHE_SIZE = new ByteSizeValue(2, ByteSizeUnit.GB); public static final int DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES = 1; public static final int DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES = 3; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index d4090fdfdfdad..904f30a2edf95 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -883,6 +883,15 @@ public static String randomRealisticUnicodeOfCodepointLength(int codePoints) { return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); } + public static String replaceUnicodeControlCharacters(String uniCodeStr, String toReplaceWith) { + // replace control characters (https://stackoverflow.com/questions/3438854/replace-unicode-control-characters/) + return uniCodeStr.replaceAll("\\p{Cc}", toReplaceWith); + } + + public static String replaceUnicodeControlCharacters(String uniCodeStr) { + return replaceUnicodeControlCharacters(uniCodeStr, " "); + } + /** * @param maxArraySize The maximum number of elements in the random array * @param stringSize The length of each String in the array diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 3d74b0df5f283..07eec1f2504fb 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -626,7 +626,7 @@ protected static void wipeDataStreams() throws IOException { // We hit a version of ES that doesn't serialize DeleteDataStreamAction.Request#wildcardExpressionsOriginallySpecified field or // that doesn't support data streams so it's safe to ignore int statusCode = e.getResponse().getStatusLine().getStatusCode(); - if (org.opensearch.common.collect.Set.of(404, 405, 500).contains(statusCode) == false) { + if (Set.of(404, 405, 500).contains(statusCode) == false) { throw e; } } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/CapturingTransport.java index f49f500d1431c..5e2c724e4e1e3 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/CapturingTransport.java @@ -36,6 +36,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; import java.util.ArrayList; import java.util.Collection; @@ -54,12 +55,14 @@ public static class CapturedRequest { public final long requestId; public final String action; public final TransportRequest request; + public final TransportRequestOptions options; - CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { + CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) { this.node = node; this.requestId = requestId; this.action = action; this.request = request; + this.options = options; } } @@ -123,6 +126,16 @@ public void clear() { } protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { - capturedRequests.add(new CapturingTransport.CapturedRequest(node, requestId, action, request)); + capturedRequests.add(new CapturingTransport.CapturedRequest(node, requestId, action, request, null)); + } + + protected void onSendRequest( + long requestId, + String action, + TransportRequest request, + DiscoveryNode node, + TransportRequestOptions options + ) { + capturedRequests.add(new CapturingTransport.CapturedRequest(node, requestId, action, request, options)); } } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java index e1e5bcc968047..5bfc8879ecbc6 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java @@ -188,13 +188,23 @@ public DiscoveryNode getNode() { public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws TransportException { requests.put(requestId, Tuple.tuple(node, action)); - onSendRequest(requestId, action, request, node); + onSendRequest(requestId, action, request, node, options); } }; } protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {} + protected void onSendRequest( + long requestId, + String action, + TransportRequest request, + DiscoveryNode node, + TransportRequestOptions options + ) { + onSendRequest(requestId, action, request, node); + } + @Override public void setMessageListener(TransportMessageListener listener) { if (this.listener != null) { diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index ec88cd0201db5..b8f7dbd88570d 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -2109,7 +2109,9 @@ public void testRegisterHandlerTwice() { "internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, - (request, message, task) -> { throw new AssertionError("boom"); } + (request, message, task) -> { + throw new AssertionError("boom"); + } ); expectThrows( IllegalArgumentException.class, @@ -2117,7 +2119,9 @@ public void testRegisterHandlerTwice() { "internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, - (request, message, task) -> { throw new AssertionError("boom"); } + (request, message, task) -> { + throw new AssertionError("boom"); + } ) ); @@ -2125,7 +2129,9 @@ public void testRegisterHandlerTwice() { "internal:action1", randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), TestRequest::new, - (request, message, task) -> { throw new AssertionError("boom"); } + (request, message, task) -> { + throw new AssertionError("boom"); + } ); }