diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index dab8afd4ec1f2..4b8ef22351145 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -52,8 +52,18 @@ jobs: with: name: results.txt + - name: Find Comment + uses: peter-evans/find-comment@v2 + id: fc + with: + issue-number: ${{ github.event.number }} + comment-author: 'github-actions[bot]' + body-includes: 'Compatibility status:' + - name: Add comment on the PR uses: peter-evans/create-or-update-comment@v3 with: + comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.number }} body-path: results.txt + edit-mode: replace diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index b86962a5ebb49..67abb5bcb8ffe 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -78,7 +78,7 @@ jobs: - name: Create Comment Success if: ${{ github.event_name == 'pull_request_target' && success() && env.result == 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ env.pr_number }} body: | @@ -104,7 +104,7 @@ jobs: - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ env.pr_number }} body: | @@ -116,7 +116,7 @@ jobs: - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ env.pr_number }} body: | diff --git a/.github/workflows/poc-checklist.yml b/.github/workflows/poc-checklist.yml index 2dfb1bbe5cdce..3d014e000a487 100644 --- a/.github/workflows/poc-checklist.yml +++ b/.github/workflows/poc-checklist.yml @@ -11,7 +11,7 @@ jobs: issues: write steps: - name: Add comment - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ github.event.issue.number }} body: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 18fb4510f8372..a2b9eab4a9add 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,9 +11,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) -- Added encryption-sdk lib to provide encryption and decryption capabilities ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466) [#9289](https://github.com/opensearch-project/OpenSearch/pull/9289)) -- Added crypto-kms plugin to provide AWS KMS based key providers for encryption/decryption. ([#8465](https://github.com/opensearch-project/OpenSearch/pull/8465)) - Added Performance collector service and node performance stats tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) + ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 - Bump `forbiddenapis` from 3.3 to 3.4 @@ -42,7 +41,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Add Encryption SDK dependencies ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -50,8 +48,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) -- [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) -- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393) [#9454](https://github.com/opensearch-project/OpenSearch/pull/9454)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) ### Deprecated @@ -80,130 +76,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add server version as REST response header [#6583](https://github.com/opensearch-project/OpenSearch/issues/6583) -- Start replication checkpointTimers on primary before segments upload to remote store. ([#8221](https://github.com/opensearch-project/OpenSearch/pull/8221)) -- [distribution/archives] [Linux] [x64] Provide the variant of the distributions bundled with JRE ([#8195](https://github.com/opensearch-project/OpenSearch/pull/8195)) -- Add configuration for file cache size to max remote data ratio to prevent oversubscription of file cache ([#8606](https://github.com/opensearch-project/OpenSearch/pull/8606)) -- Disallow compression level to be set for default and best_compression index codecs ([#8737](https://github.com/opensearch-project/OpenSearch/pull/8737)) -- Prioritize replica shard movement during shard relocation ([#8875](https://github.com/opensearch-project/OpenSearch/pull/8875)) -- Introducing Default and Best Compression codecs as their algorithm name ([#9123](https://github.com/opensearch-project/OpenSearch/pull/9123)) -- Make SearchTemplateRequest implement IndicesRequest.Replaceable ([#9122](https://github.com/opensearch-project/OpenSearch/pull/9122)) -- [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) -- [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) -- [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) -- Add support for reading partial files to HDFS repository ([#9513](https://github.com/opensearch-project/OpenSearch/issues/9513)) -- Add support for extensions to search responses using SearchExtBuilder ([#9379](https://github.com/opensearch-project/OpenSearch/pull/9379)) -- [Remote State] Create service to publish cluster state to remote store ([#9160](https://github.com/opensearch-project/OpenSearch/pull/9160)) -- [BWC and API enforcement] Decorate the existing APIs with proper annotations (part 1) ([#9520](https://github.com/opensearch-project/OpenSearch/pull/9520)) -- Add concurrent segment search related metrics to node and index stats ([#9622](https://github.com/opensearch-project/OpenSearch/issues/9622)) -- Decouple replication lag from logic to fail stale replicas ([#9507](https://github.com/opensearch-project/OpenSearch/pull/9507)) -- Expose DelimitedTermFrequencyTokenFilter to allow providing term frequencies along with terms ([#9479](https://github.com/opensearch-project/OpenSearch/pull/9479)) -- APIs for performing async blob reads and async downloads from the repository using multiple streams ([#9592](https://github.com/opensearch-project/OpenSearch/issues/9592)) -- Introduce cluster default remote translog buffer interval setting ([#9584](https://github.com/opensearch-project/OpenSearch/pull/9584)) -- Add average concurrency metric for concurrent segment search ([#9670](https://github.com/opensearch-project/OpenSearch/issues/9670)) -- [Remote state] Integrate remote cluster state in publish/commit flow ([#9665](https://github.com/opensearch-project/OpenSearch/pull/9665)) -- [Segment Replication] Adding segment replication statistics rolled up at index, node and cluster level ([#9709](https://github.com/opensearch-project/OpenSearch/pull/9709)) -- [Remote Store] Changes to introduce repository registration during bootstrap via node attributes. ([#9105](https://github.com/opensearch-project/OpenSearch/pull/9105)) ### Dependencies -- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) -- Bump `io.grpc:grpc-context` from 1.46.0 to 1.57.1 ([#8726](https://github.com/opensearch-project/OpenSearch/pull/8726), [#9145](https://github.com/opensearch-project/OpenSearch/pull/9145)) -- Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.5 to 12.1.6 ([#8724](https://github.com/opensearch-project/OpenSearch/pull/8724)) -- Bump `commons-codec:commons-codec` from 1.15 to 1.16.0 ([#8725](https://github.com/opensearch-project/OpenSearch/pull/8725)) -- Bump `org.apache.zookeeper:zookeeper` from 3.8.1 to 3.9.0 ([#8844](https://github.com/opensearch-project/OpenSearch/pull/8844), [#9146](https://github.com/opensearch-project/OpenSearch/pull/9146)) -- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842)) -- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838)) -- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840)) -- OpenJDK Update (July 2023 Patch releases) ([#8868](https://github.com/opensearch-project/OpenSearch/pull/8868) -- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) -- Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) -- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995), [#9298](https://github.com/opensearch-project/OpenSearch/pull/9298)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) -- Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) -- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) -- Bump `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) -- Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.8 to 1.1.9 ([#9147](https://github.com/opensearch-project/OpenSearch/pull/9147)) -- Bump `org.apache.maven:maven-model` from 3.9.3 to 3.9.4 ([#9148](https://github.com/opensearch-project/OpenSearch/pull/9148)) -- Bump `com.azure:azure-storage-blob` from 12.22.3 to 12.23.0 ([#9231](https://github.com/opensearch-project/OpenSearch/pull/9231)) -- Bump `com.diffplug.spotless` from 6.19.0 to 6.20.0 ([#9227](https://github.com/opensearch-project/OpenSearch/pull/9227)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) -- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) -- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) -- Bump `com.google.code.gson:gson` from 2.9.0 to 2.10.1 ([#9230](https://github.com/opensearch-project/OpenSearch/pull/9230)) -- Bump `lycheeverse/lychee-action` from 1.2.0 to 1.8.0 ([#9228](https://github.com/opensearch-project/OpenSearch/pull/9228)) -- Bump `snakeyaml` from 2.0 to 2.1 ([#9269](https://github.com/opensearch-project/OpenSearch/pull/9269)) -- Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) -- Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) -- Bump `actions/setup-java` from 2 to 3 ([#9457](https://github.com/opensearch-project/OpenSearch/pull/9457)) -- Bump `com.google.api:gax` from 2.27.0 to 2.32.0 ([#9300](https://github.com/opensearch-project/OpenSearch/pull/9300)) -- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) -- Bump `io.grpc:grpc-api` from 1.57.1 to 1.57.2 ([#9578](https://github.com/opensearch-project/OpenSearch/pull/9578)) -- Bump `org.apache.ant:ant` from 1.10.13 to 1.10.14 ([#9579](https://github.com/opensearch-project/OpenSearch/pull/9579)) +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) ### Changed -- Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) -- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) -- Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) -- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) -- [Refactor] StreamIO from common to core.common namespace in core lib ([#8157](https://github.com/opensearch-project/OpenSearch/pull/8157)) -- [Refactor] Remaining HPPC to java.util collections ([#8730](https://github.com/opensearch-project/OpenSearch/pull/8730)) -- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) -- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735)) -- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805)) -- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) -- Allow test clusters to run with TLS ([#8900](https://github.com/opensearch-project/OpenSearch/pull/8900)) -- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) -- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) -- Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) -- Performance improvements for BytesRefHash ([#8788](https://github.com/opensearch-project/OpenSearch/pull/8788)) -- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) -- [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) -- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) -- [Remote Store] Restrict user override for remote store index level settings ([#8812](https://github.com/opensearch-project/OpenSearch/pull/8812)) -- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636)) -- Make MultiBucketConsumerService thread safe to use across slices during search ([#9047](https://github.com/opensearch-project/OpenSearch/pull/9047)) -- Removed blocking wait in TransportGetSnapshotsAction which was exhausting generic threadpool ([#8377](https://github.com/opensearch-project/OpenSearch/pull/8377)) -- Adds support for tracing runnable scenarios ([#8831](https://github.com/opensearch-project/OpenSearch/pull/8831)) -- Change shard_size and shard_min_doc_count evaluation to happen in shard level reduce phase ([#9085](https://github.com/opensearch-project/OpenSearch/pull/9085)) -- Add attributes to startSpan methods ([#9199](https://github.com/opensearch-project/OpenSearch/pull/9199)) -- [Refactor] Task foundation classes to core library - pt 1 ([#9082](https://github.com/opensearch-project/OpenSearch/pull/9082)) -- Add base class for parameterizing the search based tests #9083 ([#9083](https://github.com/opensearch-project/OpenSearch/pull/9083)) -- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) -- Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) -- Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) -- Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) -- Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) -- Add support to use trace propagated from client ([#9506](https://github.com/opensearch-project/OpenSearch/pull/9506)) -- Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support ([#9469](https://github.com/opensearch-project/OpenSearch/pull/9469)) -- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) -- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) -- Redefine telemetry context restoration and propagation ([#9617](https://github.com/opensearch-project/OpenSearch/pull/9617)) -- Use non-concurrent path for sort request on timeseries index and field([#9562](https://github.com/opensearch-project/OpenSearch/pull/9562)) -- Added sampler based on `Blanket Probabilistic Sampling rate` and `Override for on demand` ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) -- [Remote Store] Add support for Remote Translog Store stats in `_remotestore/stats/` API ([#9263](https://github.com/opensearch-project/OpenSearch/pull/9263)) -- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) -- Cleanup Unreferenced file on segment merge failure ([#9503](https://github.com/opensearch-project/OpenSearch/pull/9503)) -- Move ZStd to a plugin ([#9658](https://github.com/opensearch-project/OpenSearch/pull/9658)) -- [Remote Store] Add support for Remote Translog Store upload stats in `_nodes/stats/` API ([#8908](https://github.com/opensearch-project/OpenSearch/pull/8908)) +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) ### Deprecated ### Removed -- Remove provision to create Remote Indices without Remote Translog Store ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) ### Fixed -- Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) -- Fix null_pointer_exception when creating or updating ingest pipeline ([#9259](https://github.com/opensearch-project/OpenSearch/pull/9259)) -- Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) -- Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) -- Handle null partSize in OnDemandBlockSnapshotIndexInput ([#9291](https://github.com/opensearch-project/OpenSearch/issues/9291)) -- Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) -- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) -- [Segment Replication] Fixed bug where replica shard temporarily serves stale data during an engine reset ([#9495](https://github.com/opensearch-project/OpenSearch/pull/9495)) -- Disable shard/segment level search_after short cutting if track_total_hits != false ([#9683](https://github.com/opensearch-project/OpenSearch/pull/9683)) -- [Segment Replication] Fixed bug where bytes behind metric is not accurate ([#9686](https://github.com/opensearch-project/OpenSearch/pull/9686)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x diff --git a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java new file mode 100644 index 0000000000000..64c0a9e1d7aa6 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Random; +import java.util.function.Supplier; + +@Fork(value = 3) +@Warmup(iterations = 3, time = 1) +@Measurement(iterations = 1, time = 1) +@BenchmarkMode(Mode.Throughput) +public class ArrayRoundingBenchmark { + + @Benchmark + public void round(Blackhole bh, Options opts) { + Rounding.Prepared rounding = opts.supplier.get(); + for (long key : opts.queries) { + bh.consume(rounding.round(key)); + } + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "12", + "14", + "16", + "18", + "20", + "22", + "24", + "26", + "29", + "32", + "37", + "41", + "45", + "49", + "54", + "60", + "64", + "74", + "83", + "90", + "98", + "108", + "118", + "128", + "144", + "159", + "171", + "187", + "204", + "229", + "256" }) + public Integer size; + + @Param({ "binary", "linear" }) + public String type; + + @Param({ "uniform", "skewed_edge", "skewed_center" }) + public String distribution; + + public long[] queries; + public Supplier supplier; + + @Setup + public void setup() { + Random random = new Random(size); + long[] values = new long[size]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + 100; + } + + long range = values[values.length - 1] - values[0] + 100; + long mean, stddev; + queries = new long[1000000]; + + switch (distribution) { + case "uniform": // all values equally likely. + for (int i = 0; i < queries.length; i++) { + queries[i] = values[0] + (nextPositiveLong(random) % range); + } + break; + case "skewed_edge": // distribution centered at p90 with ± 5% stddev. + mean = values[0] + (long) (range * 0.9); + stddev = (long) (range * 0.05); + for (int i = 0; i < queries.length; i++) { + queries[i] = Math.max(values[0], mean + (long) (random.nextGaussian() * stddev)); + } + break; + case "skewed_center": // distribution centered at p50 with ± 5% stddev. + mean = values[0] + (long) (range * 0.5); + stddev = (long) (range * 0.05); + for (int i = 0; i < queries.length; i++) { + queries[i] = Math.max(values[0], mean + (long) (random.nextGaussian() * stddev)); + } + break; + default: + throw new IllegalArgumentException("invalid distribution: " + distribution); + } + + switch (type) { + case "binary": + supplier = () -> new Rounding.BinarySearchArrayRounding(values, size, null); + break; + case "linear": + supplier = () -> new Rounding.BidirectionalLinearSearchArrayRounding(values, size, null); + break; + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + private static long nextPositiveLong(Random random) { + return random.nextLong() & Long.MAX_VALUE; + } + } +} diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index de2d0e023a200..1d2cfe7eccae6 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -98,18 +98,10 @@ ${path.logs} # node.attr.remote_store.translog.repository: my-repo-1 # # ---------------------------------- Experimental Features ----------------------------------- -# # Gates the visibility of the experimental segment replication features until they are production ready. # #opensearch.experimental.feature.segment_replication_experimental.enabled: false # -# -# Gates the visibility of the index setting that allows persisting data to remote store along with local disk. -# Once the feature is ready for production release, this feature flag can be removed. -# -#opensearch.experimental.feature.remote_store.enabled: false -# -# # Gates the functionality of a new parameter to the snapshot restore API # that allows for creation of a new index type that searches a snapshot # directly in a remote repository without restoring all index data to disk diff --git a/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java b/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java index c705177b0d7b6..90efc89a08caf 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java +++ b/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java @@ -36,20 +36,34 @@ * POSIX exit codes. */ public class ExitCodes { + /** No error */ public static final int OK = 0; - public static final int USAGE = 64; /* command line usage error */ - public static final int DATA_ERROR = 65; /* data format error */ - public static final int NO_INPUT = 66; /* cannot open input */ - public static final int NO_USER = 67; /* addressee unknown */ - public static final int NO_HOST = 68; /* host name unknown */ - public static final int UNAVAILABLE = 69; /* service unavailable */ - public static final int CODE_ERROR = 70; /* internal software error */ - public static final int CANT_CREATE = 73; /* can't create (user) output file */ - public static final int IO_ERROR = 74; /* input/output error */ - public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */ - public static final int PROTOCOL = 76; /* remote error in protocol */ - public static final int NOPERM = 77; /* permission denied */ - public static final int CONFIG = 78; /* configuration error */ + /** command line usage error */ + public static final int USAGE = 64; + /** data format error */ + public static final int DATA_ERROR = 65; + /** cannot open input */ + public static final int NO_INPUT = 66; + /** addressee unknown */ + public static final int NO_USER = 67; + /** host name unknown */ + public static final int NO_HOST = 68; + /** service unavailable */ + public static final int UNAVAILABLE = 69; + /** internal software error */ + public static final int CODE_ERROR = 70; + /** can't create (user) output file */ + public static final int CANT_CREATE = 73; + /** input/output error */ + public static final int IO_ERROR = 74; + /** temp failure; user is invited to retry */ + public static final int TEMP_FAILURE = 75; + /** remote error in protocol */ + public static final int PROTOCOL = 76; + /** permission denied */ + public static final int NOPERM = 77; + /** configuration error */ + public static final int CONFIG = 78; private ExitCodes() { /* no instance, just constants */ } } diff --git a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java index 657b95fa052ab..be030c18507ad 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java @@ -51,6 +51,8 @@ * which allows {@link #println(Verbosity,String)} calls which act like a logger, * only actually printing if the verbosity level of the terminal is above * the verbosity of the message. + * @see ConsoleTerminal + * @see SystemTerminal */ public abstract class Terminal { @@ -65,35 +67,57 @@ private static PrintWriter newErrorWriter() { return new PrintWriter(System.err); } - /** Defines the available verbosity levels of messages to be printed. */ + /** Defines the available verbosity levels of messages to be printed.*/ public enum Verbosity { - SILENT, /* always printed */ - NORMAL, /* printed when no options are given to cli */ - VERBOSE /* printed only when cli is passed verbose option */ + /** always printed */ + SILENT, + /** printed when no options are given to cli */ + NORMAL, + /** printed only when cli is passed verbose option */ + VERBOSE } /** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */ private Verbosity verbosity = Verbosity.NORMAL; - /** The newline used when calling println. */ + /** The newline separator used when calling println. */ private final String lineSeparator; + /** Constructs a new terminal with the given line separator. + * @param lineSeparator the line separator to use when calling println + * */ protected Terminal(String lineSeparator) { this.lineSeparator = lineSeparator; } - /** Sets the verbosity of the terminal. */ + /** Sets the {@link Terminal#verbosity} of the terminal. (Default is {@link Verbosity#NORMAL}) + * @param verbosity the {@link Verbosity} level that will be used for printing + * */ public void setVerbosity(Verbosity verbosity) { this.verbosity = verbosity; } - /** Reads clear text from the terminal input. See {@link Console#readLine()}. */ + /** Reads clear text from the terminal input. + * @see Console#readLine() + * @param prompt message to display to the user + * @return the text entered by the user + * */ public abstract String readText(String prompt); - /** Reads password text from the terminal input. See {@link Console#readPassword()}}. */ + /** Reads secret text from the terminal input with echoing disabled. + * @see Console#readPassword() + * @param prompt message to display to the user + * @return the secret as a character array + * */ public abstract char[] readSecret(String prompt); - /** Read password text form terminal input up to a maximum length. */ + /** Read secret text from terminal input with echoing disabled, up to a maximum length. + * @see Console#readPassword() + * @param prompt message to display to the user + * @param maxLength the maximum length of the secret + * @return the secret as a character array + * @throws IllegalStateException if the secret exceeds the maximum length + * */ public char[] readSecret(String prompt, int maxLength) { char[] result = readSecret(prompt); if (result.length > maxLength) { @@ -103,30 +127,48 @@ public char[] readSecret(String prompt, int maxLength) { return result; } - /** Returns a Writer which can be used to write to the terminal directly using standard output. */ + /** Returns a Writer which can be used to write to the terminal directly using standard output. + * @return a writer to {@link Terminal#DEFAULT} output + * @see Terminal.ConsoleTerminal + * @see Terminal.SystemTerminal + * */ public abstract PrintWriter getWriter(); - /** Returns a Writer which can be used to write to the terminal directly using standard error. */ + /** Returns a Writer which can be used to write to the terminal directly using standard error. + * @return a writer to stderr + * */ public PrintWriter getErrorWriter() { return ERROR_WRITER; } - /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */ + /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level, with a {@link Terminal#lineSeparator} + * @param msg the message to print + * */ public final void println(String msg) { println(Verbosity.NORMAL, msg); } - /** Prints a line to the terminal at {@code verbosity} level. */ + /** Prints message to the terminal's standard output at {@link Verbosity} level, with a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print + * @param msg the message to print + * */ public final void println(Verbosity verbosity, String msg) { print(verbosity, msg + lineSeparator); } - /** Prints message to the terminal's standard output at {@code verbosity} level, without a newline. */ + /** Prints message to the terminal's standard output at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print + * @param msg the message to print + * */ public final void print(Verbosity verbosity, String msg) { print(verbosity, msg, false); } - /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + /** Prints message to either standard or error output at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * @param isError if true, prints to standard error instead of standard output + * */ private void print(Verbosity verbosity, String msg, boolean isError) { if (isPrintable(verbosity)) { PrintWriter writer = isError ? getErrorWriter() : getWriter(); @@ -135,29 +177,44 @@ private void print(Verbosity verbosity, String msg, boolean isError) { } } - /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, without a newline. */ + /** Prints a line to the terminal's standard error at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * */ public final void errorPrint(Verbosity verbosity, String msg) { print(verbosity, msg, true); } - /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level. */ + /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, with a {@link Terminal#lineSeparator} + * @param msg the message to print + * */ public final void errorPrintln(String msg) { errorPrintln(Verbosity.NORMAL, msg); } - /** Prints a line to the terminal's standard error at {@code verbosity} level. */ + /** Prints a line to the terminal's standard error at {@link Verbosity} level, with a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * */ public final void errorPrintln(Verbosity verbosity, String msg) { errorPrint(verbosity, msg + lineSeparator); } - /** Checks if is enough {@code verbosity} level to be printed */ + /** Checks if given {@link Verbosity} level is high enough to be printed at the level defined by {@link Terminal#verbosity} + * @param verbosity the {@link Verbosity} level to check + * @return true if the {@link Verbosity} level is high enough to be printed + * @see Terminal#setVerbosity(Verbosity) + * */ public final boolean isPrintable(Verbosity verbosity) { return this.verbosity.ordinal() >= verbosity.ordinal(); } /** - * Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n' + * Prompt for a yes or no answer from the user. This method will loop until 'y', 'n' * (or the default empty value) is entered. + * @param prompt the prompt to display to the user + * @param defaultYes if true, the default answer is 'y', otherwise it is 'n' + * @return true if the user answered 'y', false if the user answered 'n' or the defaultYes value if the user entered nothing */ public final boolean promptYesNo(String prompt, boolean defaultYes) { String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]"; @@ -181,6 +238,11 @@ public final boolean promptYesNo(String prompt, boolean defaultYes) { * character is immediately preceded by a carriage return, we have * a Windows-style newline, so we discard the carriage return as well * as the newline. + * @param reader the reader to read from + * @param maxLength the maximum length of the line to read + * @return the line read from the reader + * @throws RuntimeException if the line read exceeds the maximum length + * @throws RuntimeException if an IOException occurs while reading */ public static char[] readLineToCharArray(Reader reader, int maxLength) { char[] buf = new char[maxLength + 2]; @@ -215,6 +277,7 @@ public static char[] readLineToCharArray(Reader reader, int maxLength) { } } + /** Flushes the terminal's standard output and standard error. */ public void flush() { this.getWriter().flush(); this.getErrorWriter().flush(); diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index 36bc5504061f5..d0b94536b0729 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -61,6 +61,20 @@ public V2 v2() { return v2; } + /** + * Returns {@code true} if the given object is also a tuple and the two tuples + * have equal {@link #v1()} and {@link #v2()} values. + *

+ * Returns {@code false} otherwise, including for {@code null} values or + * objects of different types. + *

+ * Note: {@code Tuple} instances are equal if the underlying values are + * equal, even if the types are different. + * + * @param o the object to compare to + * @return {@code true} if the given object is also a tuple and the two tuples + * have equal {@link #v1()} and {@link #v2()} values. + */ @Override public boolean equals(Object o) { if (this == o) return true; @@ -74,6 +88,10 @@ public boolean equals(Object o) { return true; } + /** + * Returns the hash code value for this Tuple. + * @return the hash code value for this Tuple. + */ @Override public int hashCode() { int result = v1 != null ? v1.hashCode() : 0; @@ -81,6 +99,10 @@ public int hashCode() { return result; } + /** + * Returns a string representation of a Tuple + * @return {@code "Tuple [v1=value1, v2=value2]"} + */ @Override public String toString() { return "Tuple [v1=" + v1 + ", v2=" + v2 + "]"; diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java index 37322310c7287..9572b5b9054b2 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java @@ -19,8 +19,8 @@ * Crypto provider abstractions for encryption and decryption of data. Allows registering multiple providers * for defining different ways of encrypting or decrypting data. * - * T - Encryption Metadata / CryptoContext - * U - Parsed Encryption Metadata / CryptoContext + * @param Encryption Metadata / CryptoContext + * @param Parsed Encryption Metadata / CryptoContext */ @ExperimentalApi public interface CryptoHandler extends Closeable { @@ -38,6 +38,7 @@ public interface CryptoHandler extends Closeable { * Note that underlying information in the loaded metadata object is same as present in the object created during * encryption but object type may differ. * + * @param encryptedHeaderContentSupplier supplier for encrypted header content. * @return crypto metadata instance used in decryption. */ U loadEncryptionMetadata(EncryptedHeaderContentSupplier encryptedHeaderContentSupplier) throws IOException; diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java b/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java index 3d487f9028d71..711c0d314ecef 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java @@ -11,7 +11,10 @@ * Key pair generated by {@link MasterKeyProvider} */ public class DataKeyPair { + + /** Unencrypted data key used for encryption and decryption */ private final byte[] rawKey; + /** Encrypted version of rawKey */ private final byte[] encryptedKey; /** @@ -25,7 +28,7 @@ public DataKeyPair(byte[] rawKey, byte[] encryptedKey) { } /** - * Returns raw key + * Returns Unencrypted data key * @return raw/decrypted key */ public byte[] getRawKey() { diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java index 06b18027a0726..2cda3c1f8bdb4 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java @@ -16,7 +16,9 @@ */ public class DecryptedRangedStreamProvider { + /** Adjusted range of partial encrypted content which needs to be used for decryption. */ private final long[] adjustedRange; + /** Stream provider for decryption and range re-adjustment. */ private final UnaryOperator decryptedStreamProvider; /** diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java index b5feeb6d37ec6..8afa48eb92c0f 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java @@ -22,7 +22,7 @@ public interface MasterKeyProvider extends Closeable { DataKeyPair generateDataPair(); /** - * Returns decrpted key against the encrypted key. + * Returns decrypted key against the encrypted key. * @param encryptedKey Key to decrypt * @return Decrypted version of key. */ @@ -35,6 +35,7 @@ public interface MasterKeyProvider extends Closeable { String getKeyId(); /** + * Returns encryption context associated with this master key. * @return encryption context associated with this master key. */ Map getEncryptionContext(); diff --git a/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java b/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java index 58bf24a210bae..f0c6969377d78 100644 --- a/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java +++ b/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java @@ -23,7 +23,10 @@ */ public class CompressionProvider implements CompressorProvider { - /** Returns the concrete {@link Compressor}s provided by the compress library */ + /** + * Returns the concrete {@link Compressor}s provided by the compress library + * @return a list of {@link Compressor}s + * */ @SuppressWarnings({ "unchecked", "rawtypes" }) @Override public List> getCompressors() { diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index 009cc673058d3..5b7795a5647f5 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -119,9 +119,13 @@ protected void addDefaultAttributes(Span span) { } @Override - public Span startSpan(String spanName, Map> headers, Attributes attributes) { + public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { Optional propagatedSpan = tracingTelemetry.getContextPropagator().extractFromHeaders(headers); - return startSpan(spanName, propagatedSpan.map(SpanContext::new).orElse(null), attributes); + return startSpan( + spanCreationContext.getSpanName(), + propagatedSpan.map(SpanContext::new).orElse(null), + spanCreationContext.getAttributes() + ); } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java index cb2bf73775564..b0692f1b62a48 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java @@ -10,7 +10,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.tracing.Span; -import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.SpanCreationContext; import java.util.List; import java.util.Map; @@ -28,10 +28,9 @@ public interface HttpTracer { /** * Start the span with propagating the tracing info from the HttpRequest header. * - * @param spanName span name. + * @param spanCreationContext span name. * @param header http request header. - * @param attributes span attributes. * @return span. */ - Span startSpan(String spanName, Map> header, Attributes attributes); + Span startSpan(SpanCreationContext spanCreationContext, Map> header); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index 96a969369c3a9..d7206a3c6b094 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -81,7 +81,7 @@ public void close() { } @Override - public Span startSpan(String spanName, Map> header, Attributes attributes) { + public Span startSpan(SpanCreationContext spanCreationContext, Map> header) { return NoopSpan.INSTANCE; } } diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 5205bdfc8a031..48b72e1f673fe 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -19,8 +19,8 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicReference; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -252,12 +252,6 @@ public void testEndSpanByClosingSpanScopeMultiple() { public void testSpanAcrossThreads() { TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - AtomicReference currentSpanRefThread1 = new AtomicReference<>(); - AtomicReference currentSpanRefThread2 = new AtomicReference<>(); - AtomicReference currentSpanRefAfterEndThread2 = new AtomicReference<>(); - - AtomicReference spanRef = new AtomicReference<>(); - AtomicReference spanT2Ref = new AtomicReference<>(); ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( threadContext, @@ -265,29 +259,26 @@ public void testSpanAcrossThreads() { ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - executorService.execute(() -> { + CompletableFuture asyncTask = CompletableFuture.runAsync(() -> { // create a span Span span = defaultTracer.startSpan(new SpanCreationContext("span_name_t_1", Attributes.EMPTY)); SpanScope spanScope = defaultTracer.withSpanInScope(span); - spanRef.set(span); - executorService.execute(() -> { + CompletableFuture asyncTask1 = CompletableFuture.runAsync(() -> { Span spanT2 = defaultTracer.startSpan(new SpanCreationContext("span_name_t_2", Attributes.EMPTY)); SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); - spanT2Ref.set(spanT2); - - currentSpanRefThread2.set(defaultTracer.getCurrentSpan().getSpan()); + assertEquals(spanT2, defaultTracer.getCurrentSpan().getSpan()); - spanT2.endSpan(); spanScopeT2.close(); - currentSpanRefAfterEndThread2.set(getCurrentSpanFromContext(defaultTracer)); - }); + spanT2.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask1.join(); spanScope.close(); - currentSpanRefThread1.set(getCurrentSpanFromContext(defaultTracer)); - }); - assertEquals(spanT2Ref.get(), currentSpanRefThread2.get()); - assertEquals(spanRef.get(), currentSpanRefAfterEndThread2.get()); - assertEquals(null, currentSpanRefThread1.get()); + span.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask.join(); } public void testSpanCloseOnThread2() { @@ -297,27 +288,27 @@ public void testSpanCloseOnThread2() { threadContext, tracingTelemetry ); - AtomicReference currentSpanRefThread1 = new AtomicReference<>(); - AtomicReference currentSpanRefThread2 = new AtomicReference<>(); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); final Span span = defaultTracer.startSpan(new SpanCreationContext("span_name_t1", Attributes.EMPTY)); try (SpanScope spanScope = defaultTracer.withSpanInScope(span)) { - executorService.execute(() -> async(new ActionListener() { + CompletableFuture asyncTask = CompletableFuture.runAsync(() -> async(new ActionListener() { @Override public void onResponse(Boolean response) { - span.endSpan(); - currentSpanRefThread2.set(defaultTracer.getCurrentSpan()); + try (SpanScope s = defaultTracer.withSpanInScope(span)) { + assertEquals(span, defaultTracer.getCurrentSpan().getSpan()); + } finally { + span.endSpan(); + } } @Override public void onFailure(Exception e) { } - })); - currentSpanRefThread1.set(defaultTracer.getCurrentSpan()); + }), executorService); + assertEquals(span, defaultTracer.getCurrentSpan().getSpan()); + asyncTask.join(); } - assertEquals(null, currentSpanRefThread2.get()); - assertEquals(span, currentSpanRefThread1.get().getSpan()); assertEquals(null, defaultTracer.getCurrentSpan()); } @@ -337,13 +328,6 @@ private void async(ActionListener actionListener) { public void testSpanAcrossThreadsMultipleSpans() { TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - AtomicReference currentSpanRefThread1 = new AtomicReference<>(); - AtomicReference currentSpanRefThread2 = new AtomicReference<>(); - AtomicReference currentSpanRefAfterEndThread2 = new AtomicReference<>(); - - AtomicReference parentSpanRef = new AtomicReference<>(); - AtomicReference spanRef = new AtomicReference<>(); - AtomicReference spanT2Ref = new AtomicReference<>(); ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( threadContext, @@ -351,43 +335,38 @@ public void testSpanAcrossThreadsMultipleSpans() { ); DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); - executorService.execute(() -> { + CompletableFuture asyncTask = CompletableFuture.runAsync(() -> { // create a parent span Span parentSpan = defaultTracer.startSpan(new SpanCreationContext("p_span_name", Attributes.EMPTY)); SpanScope parentSpanScope = defaultTracer.withSpanInScope(parentSpan); - parentSpanRef.set(parentSpan); // create a span Span span = defaultTracer.startSpan(new SpanCreationContext("span_name_t_1", Attributes.EMPTY)); SpanScope spanScope = defaultTracer.withSpanInScope(span); - spanRef.set(span); - executorService.execute(() -> { + CompletableFuture asyncTask1 = CompletableFuture.runAsync(() -> { Span spanT2 = defaultTracer.startSpan(new SpanCreationContext("span_name_t_2", Attributes.EMPTY)); SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); - Span spanT21 = defaultTracer.startSpan(new SpanCreationContext("span_name_t_2", Attributes.EMPTY)); - SpanScope spanScopeT21 = defaultTracer.withSpanInScope(spanT2); - spanT2Ref.set(spanT21); - currentSpanRefThread2.set(defaultTracer.getCurrentSpan().getSpan()); - - spanT21.endSpan(); + SpanScope spanScopeT21 = defaultTracer.withSpanInScope(spanT21); + assertEquals(spanT21, defaultTracer.getCurrentSpan().getSpan()); spanScopeT21.close(); + spanT21.endSpan(); - spanT2.endSpan(); spanScopeT2.close(); - currentSpanRefAfterEndThread2.set(getCurrentSpanFromContext(defaultTracer)); - }); + spanT2.endSpan(); + + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + + asyncTask1.join(); + spanScope.close(); + span.endSpan(); parentSpanScope.close(); - currentSpanRefThread1.set(getCurrentSpanFromContext(defaultTracer)); - }); - assertEquals(spanT2Ref.get(), currentSpanRefThread2.get()); - assertEquals(spanRef.get(), currentSpanRefAfterEndThread2.get()); - assertEquals(null, currentSpanRefThread1.get()); - } - - private static Span getCurrentSpanFromContext(DefaultTracer defaultTracer) { - return defaultTracer.getCurrentSpan() != null ? defaultTracer.getCurrentSpan().getSpan() : null; + parentSpan.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask.join(); } public void testClose() throws IOException { diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index ea517ca53c000..cad7d67f3ef84 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -69,7 +69,6 @@ dependencies { testImplementation project(':modules:transport-netty4') // for parent/child testing testImplementation project(':modules:parent-join') - testImplementation project(':plugins:custom-codecs') } restResources { @@ -96,5 +95,4 @@ forbiddenPatterns { tasks.named("bundlePlugin").configure { dependsOn("copyParentJoinMetadata") dependsOn("copyTransportNetty4Metadata") - dependsOn("copyCustomCodecsMetadata") } diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java index 60afe44babc26..604c233ca49c4 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java @@ -15,7 +15,6 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.index.codec.customcodecs.CustomCodecPlugin; import org.opensearch.index.engine.Segment; import org.opensearch.index.reindex.BulkByScrollResponse; import org.opensearch.index.reindex.ReindexAction; @@ -47,7 +46,7 @@ public class MultiCodecReindexIT extends ReindexTestCase { @Override protected Collection> nodePlugins() { - return List.of(CustomCodecPlugin.class, ReindexModulePlugin.class); + return List.of(ReindexModulePlugin.class); } public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { @@ -57,10 +56,6 @@ public void testReindexingMultipleCodecs() throws InterruptedException, Executio "BEST_COMPRESSION", "zlib", "BEST_COMPRESSION", - "zstd_no_dict", - "ZSTD_NO_DICT", - "zstd", - "ZSTD", "default", "BEST_SPEED", "lz4", @@ -135,7 +130,7 @@ private void assertReindexingWithMultipleCodecs(String destCodec, String destCod } private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); assertAcked( client().admin() @@ -144,7 +139,7 @@ private void useCodec(String index, String codec) throws ExecutionException, Int .get() ); - assertAcked(client().admin().indices().prepareOpen(index)); + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); } private void flushAndRefreshIndex(String index) { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 775c8d4405cdb..0271472125814 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -52,6 +52,7 @@ import org.opensearch.http.HttpHandlingSettings; import org.opensearch.http.HttpReadTimeoutException; import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NettyAllocator; import org.opensearch.transport.NettyByteBufSizer; @@ -191,9 +192,10 @@ public Netty4HttpServerTransport( NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, ClusterSettings clusterSettings, - SharedGroupFactory sharedGroupFactory + SharedGroupFactory sharedGroupFactory, + Tracer tracer ) { - super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index fcf128e7c79e5..ca51d70702a82 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -48,6 +48,7 @@ import org.opensearch.http.netty4.Netty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.netty4.Netty4Transport; @@ -122,7 +123,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap( NETTY_HTTP_TRANSPORT_NAME, @@ -134,7 +136,8 @@ public Map> getHttpTransports( xContentRegistry, dispatcher, clusterSettings, - getSharedGroupFactory(settings) + getSharedGroupFactory(settings), + tracer ) ); } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index ea36d1ee26852..03990c173d547 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -47,6 +47,7 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -112,7 +113,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { httpServerTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index 4201a4d0a8b4b..af868a3a3cb88 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -45,6 +45,7 @@ import org.opensearch.http.HttpResponse; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.NullDispatcher; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -135,7 +136,8 @@ class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { xContentRegistry(), new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index adcada811e537..d892918decfb5 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -55,6 +55,7 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; @@ -198,7 +199,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -247,7 +249,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), clusterSettings, - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -265,7 +268,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); @@ -317,7 +321,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -379,7 +384,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -448,7 +454,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -521,7 +528,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); diff --git a/plugins/custom-codecs/build.gradle b/plugins/custom-codecs/build.gradle deleted file mode 100644 index 253822e88b817..0000000000000 --- a/plugins/custom-codecs/build.gradle +++ /dev/null @@ -1,27 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -apply plugin: 'opensearch.opensearchplugin' -apply plugin: 'opensearch.internal-cluster-test' - -opensearchplugin { - name 'custom-codecs' - description 'A plugin that implements custom compression codecs.' - classname 'org.opensearch.index.codec.customcodecs.CustomCodecPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') -} - -dependencies { - api "com.github.luben:zstd-jni:1.5.5-5" -} - -testingConventions.enabled = false; diff --git a/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java b/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java deleted file mode 100644 index 9bef3e2d3d235..0000000000000 --- a/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.apache.logging.log4j.core.util.Throwables; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.codec.customcodecs.CustomCodecPlugin; -import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.ExecutionException; - -import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_CODEC; -import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_NO_DICT_CODEC; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CodecCompressionLevelIT extends OpenSearchIntegTestCase { - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(CustomCodecPlugin.class); - } - - public void testLuceneCodecsCreateIndexWithCompressionLevel() { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - assertThrows( - IllegalArgumentException.class, - () -> createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - .build() - ) - ); - - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .build() - ); - ensureGreen(index); - } - - public void testZStandardCodecsCreateIndexWithCompressionLevel() { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - .build() - ); - - ensureGreen(index); - } - - public void testZStandardToLuceneCodecsWithCompressionLevel() throws ExecutionException, InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - .build() - ); - ensureGreen(index); - - assertAcked(client().admin().indices().prepareClose(index)); - - Throwable executionException = expectThrows( - ExecutionException.class, - () -> client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder().put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - ) - ) - .get() - ); - - Throwable rootCause = Throwables.getRootCause(executionException); - assertEquals(IllegalArgumentException.class, rootCause.getClass()); - assertTrue(rootCause.getMessage().startsWith("Compression level cannot be set")); - - assertAcked( - client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder() - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .put("index.codec.compression_level", (String) null) - ) - ) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - ensureGreen(index); - } - - public void testLuceneToZStandardCodecsWithCompressionLevel() throws ExecutionException, InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .build() - ); - ensureGreen(index); - - assertAcked(client().admin().indices().prepareClose(index)); - - Throwable executionException = expectThrows( - ExecutionException.class, - () -> client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder() - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - ) - ) - .get() - ); - - Throwable rootCause = Throwables.getRootCause(executionException); - assertEquals(IllegalArgumentException.class, rootCause.getClass()); - assertTrue(rootCause.getMessage().startsWith("Compression level cannot be set")); - - assertAcked( - client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder() - .put("index.codec", randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - ) - ) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - ensureGreen(index); - } - -} diff --git a/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java b/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java deleted file mode 100644 index fda9327c79dc1..0000000000000 --- a/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.opensearch.action.admin.indices.refresh.RefreshResponse; -import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.codec.customcodecs.CustomCodecPlugin; -import org.opensearch.index.engine.Segment; -import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.stream.Collectors.toList; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.is; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class MultiCodecMergeIT extends OpenSearchIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(CustomCodecPlugin.class); - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9872") - public void testForceMergeMultipleCodecs() throws ExecutionException, InterruptedException { - - Map codecMap = Map.of( - "best_compression", - "BEST_COMPRESSION", - "zlib", - "BEST_COMPRESSION", - "zstd_no_dict", - "ZSTD_NO_DICT", - "zstd", - "ZSTD", - "default", - "BEST_SPEED", - "lz4", - "BEST_SPEED" - ); - - for (Map.Entry codec : codecMap.entrySet()) { - forceMergeMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); - } - - } - - private void forceMergeMultipleCodecs(String finalCodec, String finalCodecMode, Map codecMap) throws ExecutionException, - InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index" + finalCodec; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", "default") - .put("index.merge.policy.max_merged_segment", "1b") - .build() - ); - ensureGreen(index); - // ingesting and asserting segment codec mode for all four codecs - for (Map.Entry codec : codecMap.entrySet()) { - useCodec(index, codec.getKey()); - ingestDocs(index); - } - - assertTrue( - getSegments(index).stream() - .flatMap(s -> s.getAttributes().values().stream()) - .collect(Collectors.toSet()) - .containsAll(codecMap.values()) - ); - - // force merge into final codec - useCodec(index, finalCodec); - flushAndRefreshIndex(index); - final ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); - - assertThat(forceMergeResponse.getFailedShards(), is(0)); - assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); - - flushAndRefreshIndex(index); - - List segments = getSegments(index).stream().filter(Segment::isSearch).collect(Collectors.toList()); - assertEquals(1, segments.size()); - assertTrue(segments.stream().findFirst().get().attributes.containsValue(finalCodecMode)); - } - - private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); - - assertAcked( - client().admin() - .indices() - .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - } - - private void ingestDocs(String index) throws InterruptedException { - ingest(index); - flushAndRefreshIndex(index); - } - - private ArrayList getSegments(String index) { - - return new ArrayList<>( - client().admin() - .indices() - .segments(new IndicesSegmentsRequest(index)) - .actionGet() - .getIndices() - .get(index) - .getShards() - .get(0) - .getShards()[0].getSegments() - ); - } - - private void ingest(String index) throws InterruptedException { - - final int nbDocs = randomIntBetween(1, 5); - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) - .collect(toList()) - ); - } - - private void flushAndRefreshIndex(String index) { - - // Request is not blocked - for (String blockSetting : Arrays.asList( - SETTING_BLOCKS_READ, - SETTING_BLOCKS_WRITE, - SETTING_READ_ONLY, - SETTING_BLOCKS_METADATA, - SETTING_READ_ONLY_ALLOW_DELETE - )) { - try { - enableIndexBlock(index, blockSetting); - FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); - assertNoFailures(flushResponse); - RefreshResponse response = client().admin().indices().prepareRefresh(index).execute().actionGet(); - assertNoFailures(response); - } finally { - disableIndexBlock(index, blockSetting); - } - } - } - -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java deleted file mode 100644 index 91a13a1d924a2..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.CodecServiceFactory; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.plugins.EnginePlugin; -import org.opensearch.plugins.Plugin; - -import java.util.Optional; - -/** - * A plugin that implements custom codecs. Supports these codecs: - *

- * - * @opensearch.internal - */ -public final class CustomCodecPlugin extends Plugin implements EnginePlugin { - - /** - * Creates a new instance - */ - public CustomCodecPlugin() {} - - /** - * @param indexSettings is the default indexSettings - * @return the engine factory - */ - @Override - public Optional getCustomCodecServiceFactory(final IndexSettings indexSettings) { - String codecName = indexSettings.getValue(EngineConfig.INDEX_CODEC_SETTING); - if (codecName.equals(CustomCodecService.ZSTD_NO_DICT_CODEC) || codecName.equals(CustomCodecService.ZSTD_CODEC)) { - return Optional.of(new CustomCodecServiceFactory()); - } - return Optional.empty(); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java deleted file mode 100644 index de0eb2b3286d3..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.codecs.Codec; -import org.opensearch.common.collect.MapBuilder; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.CodecService; -import org.opensearch.index.mapper.MapperService; - -import java.util.Arrays; -import java.util.Map; -import java.util.stream.Stream; - -import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; - -/** - * CustomCodecService provides ZSTD and ZSTD_NO_DICT compression codecs. - */ -public class CustomCodecService extends CodecService { - private final Map codecs; - /** - * ZStandard codec - */ - public static final String ZSTD_CODEC = "zstd"; - /** - * ZStandard without dictionary codec - */ - public static final String ZSTD_NO_DICT_CODEC = "zstd_no_dict"; - - /** - * Creates a new CustomCodecService. - * - * @param mapperService The mapper service. - * @param indexSettings The index settings. - * @param logger The logger. - */ - public CustomCodecService(MapperService mapperService, IndexSettings indexSettings, Logger logger) { - super(mapperService, indexSettings, logger); - int compressionLevel = indexSettings.getValue(INDEX_CODEC_COMPRESSION_LEVEL_SETTING); - final MapBuilder codecs = MapBuilder.newMapBuilder(); - if (mapperService == null) { - codecs.put(ZSTD_CODEC, new ZstdCodec(compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(compressionLevel)); - } else { - codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger, compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger, compressionLevel)); - } - this.codecs = codecs.immutableMap(); - } - - @Override - public Codec codec(String name) { - Codec codec = codecs.get(name); - if (codec == null) { - return super.codec(name); - } - return codec; - } - - @Override - public String[] availableCodecs() { - return Stream.concat(Arrays.stream(super.availableCodecs()), codecs.keySet().stream()).toArray(String[]::new); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java deleted file mode 100644 index d634616162684..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.opensearch.index.codec.CodecService; -import org.opensearch.index.codec.CodecServiceConfig; -import org.opensearch.index.codec.CodecServiceFactory; - -/** - * A factory for creating new {@link CodecService} instance - */ -public class CustomCodecServiceFactory implements CodecServiceFactory { - - /** Creates a new instance. */ - public CustomCodecServiceFactory() {} - - @Override - public CodecService createCodecService(CodecServiceConfig config) { - return new CustomCodecService(config.getMapperService(), config.getIndexSettings(), config.getLogger()); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java deleted file mode 100644 index 89acc980abd2f..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.codecs.FilterCodec; -import org.apache.lucene.codecs.StoredFieldsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; -import org.opensearch.index.codec.PerFieldMappingPostingFormatCodec; -import org.opensearch.index.mapper.MapperService; - -import java.util.Collections; -import java.util.Set; - -/** - * - * Extends {@link FilterCodec} to reuse the functionality of Lucene Codec. - * Supports two modes zstd and zstd_no_dict. - * - * @opensearch.internal - */ -public abstract class Lucene95CustomCodec extends FilterCodec { - - /** Default compression level used for compression */ - public static final int DEFAULT_COMPRESSION_LEVEL = 3; - - /** Each mode represents a compression algorithm. */ - public enum Mode { - /** - * ZStandard mode with dictionary - */ - ZSTD("ZSTD", Set.of("zstd")), - /** - * ZStandard mode without dictionary - */ - ZSTD_NO_DICT("ZSTDNODICT", Set.of("zstd_no_dict")), - /** - * Deprecated ZStandard mode, added for backward compatibility to support indices created in 2.9.0 where - * both ZSTD and ZSTD_NO_DICT used Lucene95CustomCodec underneath. This should not be used to - * create new indices. - */ - ZSTD_DEPRECATED("Lucene95CustomCodec", Collections.emptySet()); - - private final String codec; - private final Set aliases; - - Mode(String codec, Set aliases) { - this.codec = codec; - this.aliases = aliases; - } - - /** - * Returns the Codec that is registered with Lucene - */ - public String getCodec() { - return codec; - } - - /** - * Returns the aliases of the Codec - */ - public Set getAliases() { - return aliases; - } - } - - private final StoredFieldsFormat storedFieldsFormat; - - /** - * Creates a new compression codec with the default compression level. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - */ - public Lucene95CustomCodec(Mode mode) { - this(mode, DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new compression codec with the given compression level. We use - * lowercase letters when registering the codec so that we remain consistent with - * the other compression codecs: default, lucene_default, and best_compression. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - * @param compressionLevel The compression level. - */ - public Lucene95CustomCodec(Mode mode, int compressionLevel) { - super(mode.getCodec(), new Lucene95Codec()); - this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); - } - - /** - * Creates a new compression codec with the given compression level. We use - * lowercase letters when registering the codec so that we remain consistent with - * the other compression codecs: default, lucene_default, and best_compression. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - * @param compressionLevel The compression level. - * @param mapperService The mapper service. - * @param logger The logger. - */ - public Lucene95CustomCodec(Mode mode, int compressionLevel, MapperService mapperService, Logger logger) { - super(mode.getCodec(), new PerFieldMappingPostingFormatCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, logger)); - this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); - } - - @Override - public StoredFieldsFormat storedFieldsFormat() { - return storedFieldsFormat; - } - - @Override - public String toString() { - return getClass().getSimpleName(); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java deleted file mode 100644 index 79d97035089ab..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.StoredFieldsFormat; -import org.apache.lucene.codecs.StoredFieldsReader; -import org.apache.lucene.codecs.StoredFieldsWriter; -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.SegmentInfo; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; - -import java.io.IOException; -import java.util.Objects; - -/** Stored field format used by pluggable codec */ -public class Lucene95CustomStoredFieldsFormat extends StoredFieldsFormat { - - /** A key that we use to map to a mode */ - public static final String MODE_KEY = Lucene95CustomStoredFieldsFormat.class.getSimpleName() + ".mode"; - - private static final int ZSTD_BLOCK_LENGTH = 10 * 48 * 1024; - private static final int ZSTD_MAX_DOCS_PER_BLOCK = 4096; - private static final int ZSTD_BLOCK_SHIFT = 10; - - private final CompressionMode zstdCompressionMode; - private final CompressionMode zstdNoDictCompressionMode; - - private final Lucene95CustomCodec.Mode mode; - private final int compressionLevel; - - /** default constructor */ - public Lucene95CustomStoredFieldsFormat() { - this(Lucene95CustomCodec.Mode.ZSTD, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new instance. - * - * @param mode The mode represents ZSTD or ZSTDNODICT - */ - public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode) { - this(mode, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new instance with the specified mode and compression level. - * - * @param mode The mode represents ZSTD or ZSTDNODICT - * @param compressionLevel The compression level for the mode. - */ - public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode, int compressionLevel) { - this.mode = Objects.requireNonNull(mode); - this.compressionLevel = compressionLevel; - zstdCompressionMode = new ZstdCompressionMode(compressionLevel); - zstdNoDictCompressionMode = new ZstdNoDictCompressionMode(compressionLevel); - } - - /** - * Returns a {@link StoredFieldsReader} to load stored fields. - * @param directory The index directory. - * @param si The SegmentInfo that stores segment information. - * @param fn The fieldInfos. - * @param context The IOContext that holds additional details on the merge/search context. - */ - @Override - public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { - String value = si.getAttribute(MODE_KEY); - if (value == null) { - throw new IllegalStateException("missing value for " + MODE_KEY + " for segment: " + si.name); - } - Lucene95CustomCodec.Mode mode = Lucene95CustomCodec.Mode.valueOf(value); - return impl(mode).fieldsReader(directory, si, fn, context); - } - - /** - * Returns a {@link StoredFieldsReader} to write stored fields. - * @param directory The index directory. - * @param si The SegmentInfo that stores segment information. - * @param context The IOContext that holds additional details on the merge/search context. - */ - @Override - public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { - String previous = si.putAttribute(MODE_KEY, mode.name()); - if (previous != null && previous.equals(mode.name()) == false) { - throw new IllegalStateException( - "found existing value for " + MODE_KEY + " for segment: " + si.name + " old = " + previous + ", new = " + mode.name() - ); - } - return impl(mode).fieldsWriter(directory, si, context); - } - - StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { - switch (mode) { - case ZSTD: - case ZSTD_DEPRECATED: - return new Lucene90CompressingStoredFieldsFormat( - "CustomStoredFieldsZstd", - zstdCompressionMode, - ZSTD_BLOCK_LENGTH, - ZSTD_MAX_DOCS_PER_BLOCK, - ZSTD_BLOCK_SHIFT - ); - case ZSTD_NO_DICT: - return new Lucene90CompressingStoredFieldsFormat( - "CustomStoredFieldsZstdNoDict", - zstdNoDictCompressionMode, - ZSTD_BLOCK_LENGTH, - ZSTD_MAX_DOCS_PER_BLOCK, - ZSTD_BLOCK_SHIFT - ); - default: - throw new AssertionError(); - } - } - - Lucene95CustomCodec.Mode getMode() { - return mode; - } - - /** - * Returns the compression level. - */ - public int getCompressionLevel() { - return compressionLevel; - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java deleted file mode 100644 index a3e3a34a5d258..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.Setting; -import org.opensearch.index.codec.CodecAliases; -import org.opensearch.index.codec.CodecSettings; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.MapperService; - -import java.util.Set; - -/** - * ZstdCodec provides ZSTD compressor using the zstd-jni library. - */ -public class ZstdCodec extends Lucene95CustomCodec implements CodecSettings, CodecAliases { - - /** - * Creates a new ZstdCodec instance with the default compression level. - */ - public ZstdCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdCodec(int compressionLevel) { - super(Mode.ZSTD, compressionLevel); - } - - /** - * Creates a new ZstdCodec instance. - * - * @param mapperService The mapper service. - * @param logger The logger. - * @param compressionLevel The compression level. - */ - public ZstdCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } - - @Override - public boolean supports(Setting setting) { - return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); - } - - @Override - public Set aliases() { - return Mode.ZSTD.getAliases(); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java deleted file mode 100644 index 05ff725933e1a..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdCompressCtx; -import com.github.luben.zstd.ZstdDecompressCtx; -import com.github.luben.zstd.ZstdDictCompress; -import com.github.luben.zstd.ZstdDictDecompress; - -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -import java.io.IOException; - -/** Zstandard Compression Mode */ -public class ZstdCompressionMode extends CompressionMode { - - private static final int NUM_SUB_BLOCKS = 10; - private static final int DICT_SIZE_FACTOR = 6; - private static final int DEFAULT_COMPRESSION_LEVEL = 6; - - private final int compressionLevel; - - /** default constructor */ - protected ZstdCompressionMode() { - this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; - } - - /** - * Creates a new instance. - * - * @param compressionLevel The compression level to use. - */ - protected ZstdCompressionMode(int compressionLevel) { - this.compressionLevel = compressionLevel; - } - - /** Creates a new compressor instance.*/ - @Override - public Compressor newCompressor() { - return new ZstdCompressor(compressionLevel); - } - - /** Creates a new decompressor instance. */ - @Override - public Decompressor newDecompressor() { - return new ZstdDecompressor(); - } - - /** zstandard compressor */ - private static final class ZstdCompressor extends Compressor { - - private final int compressionLevel; - private byte[] compressedBuffer; - - /** compressor with a given compresion level */ - public ZstdCompressor(int compressionLevel) { - this.compressionLevel = compressionLevel; - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - /*resuable compress function*/ - private void doCompress(byte[] bytes, int offset, int length, ZstdCompressCtx cctx, DataOutput out) throws IOException { - if (length == 0) { - out.writeVInt(0); - return; - } - final int maxCompressedLength = (int) Zstd.compressBound(length); - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, maxCompressedLength); - - int compressedSize = cctx.compressByteArray(compressedBuffer, 0, compressedBuffer.length, bytes, offset, length); - - out.writeVInt(compressedSize); - out.writeBytes(compressedBuffer, compressedSize); - } - - private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { - assert offset >= 0 : "offset value must be greater than 0"; - - final int dictLength = length / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR); - final int blockLength = (length - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; - out.writeVInt(dictLength); - out.writeVInt(blockLength); - - final int end = offset + length; - assert end >= 0 : "buffer read size must be greater than 0"; - - try (ZstdCompressCtx cctx = new ZstdCompressCtx()) { - cctx.setLevel(compressionLevel); - - // dictionary compression first - doCompress(bytes, offset, dictLength, cctx, out); - try (ZstdDictCompress dictCompress = new ZstdDictCompress(bytes, offset, dictLength, compressionLevel)) { - cctx.loadDict(dictCompress); - - for (int start = offset + dictLength; start < end; start += blockLength) { - int l = Math.min(blockLength, end - start); - doCompress(bytes, start, l, cctx, out); - } - } - } - } - - @Override - public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { - final int length = (int) buffersInput.size(); - byte[] bytes = new byte[length]; - buffersInput.readBytes(bytes, 0, length); - compress(bytes, 0, length, out); - } - - @Override - public void close() throws IOException {} - } - - /** zstandard decompressor */ - private static final class ZstdDecompressor extends Decompressor { - - private byte[] compressedBuffer; - - /** default decompressor */ - public ZstdDecompressor() { - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - /*resuable decompress function*/ - private void doDecompress(DataInput in, ZstdDecompressCtx dctx, BytesRef bytes, int decompressedLen) throws IOException { - final int compressedLength = in.readVInt(); - if (compressedLength == 0) { - return; - } - - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, compressedLength); - in.readBytes(compressedBuffer, 0, compressedLength); - - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + decompressedLen); - int uncompressed = dctx.decompressByteArray(bytes.bytes, bytes.length, decompressedLen, compressedBuffer, 0, compressedLength); - - if (decompressedLen != uncompressed) { - throw new IllegalStateException(decompressedLen + " " + uncompressed); - } - bytes.length += uncompressed; - } - - @Override - public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength : "buffer read size must be within limit"; - - if (length == 0) { - bytes.length = 0; - return; - } - final int dictLength = in.readVInt(); - final int blockLength = in.readVInt(); - bytes.bytes = ArrayUtil.growNoCopy(bytes.bytes, dictLength); - bytes.offset = bytes.length = 0; - - try (ZstdDecompressCtx dctx = new ZstdDecompressCtx()) { - - // decompress dictionary first - doDecompress(in, dctx, bytes, dictLength); - try (ZstdDictDecompress dictDecompress = new ZstdDictDecompress(bytes.bytes, 0, dictLength)) { - dctx.loadDict(dictDecompress); - - int offsetInBlock = dictLength; - int offsetInBytesRef = offset; - - // Skip unneeded blocks - while (offsetInBlock + blockLength < offset) { - final int compressedLength = in.readVInt(); - in.skipBytes(compressedLength); - offsetInBlock += blockLength; - offsetInBytesRef -= blockLength; - } - - // Read blocks that intersect with the interval we need - while (offsetInBlock < offset + length) { - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); - int l = Math.min(blockLength, originalLength - offsetInBlock); - doDecompress(in, dctx, bytes, l); - offsetInBlock += blockLength; - } - - bytes.offset = offsetInBytesRef; - bytes.length = length; - - assert bytes.isValid() : "decompression output is corrupted"; - } - } - } - - @Override - public Decompressor clone() { - return new ZstdDecompressor(); - } - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdDeprecatedCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdDeprecatedCodec.java deleted file mode 100644 index 02fa386db97b3..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdDeprecatedCodec.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.Setting; -import org.opensearch.index.codec.CodecSettings; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.MapperService; - -/** - * ZstdDeprecatedCodec provides ZSTD compressor using the zstd-jni library. - * Added to support backward compatibility for indices created with Lucene95CustomCodec as codec name. - */ -@Deprecated(since = "2.10") -public class ZstdDeprecatedCodec extends Lucene95CustomCodec implements CodecSettings { - - /** - * Creates a new ZstdDefaultCodec instance with the default compression level. - */ - public ZstdDeprecatedCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdDefaultCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdDeprecatedCodec(int compressionLevel) { - super(Mode.ZSTD_DEPRECATED, compressionLevel); - } - - /** - * Creates a new ZstdDefaultCodec instance. - * - * @param mapperService The mapper service. - * @param logger The logger. - * @param compressionLevel The compression level. - */ - public ZstdDeprecatedCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD_DEPRECATED, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } - - @Override - public boolean supports(Setting setting) { - return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java deleted file mode 100644 index ea7351f755361..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.Setting; -import org.opensearch.index.codec.CodecAliases; -import org.opensearch.index.codec.CodecSettings; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.MapperService; - -import java.util.Set; - -/** - * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. - */ -public class ZstdNoDictCodec extends Lucene95CustomCodec implements CodecSettings, CodecAliases { - - /** - * Creates a new ZstdNoDictCodec instance with the default compression level. - */ - public ZstdNoDictCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdNoDictCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdNoDictCodec(int compressionLevel) { - super(Mode.ZSTD_NO_DICT, compressionLevel); - } - - /** - * Creates a new ZstdNoDictCodec instance. - * - * @param mapperService The mapper service. - * @param logger The logger. - * @param compressionLevel The compression level. - */ - public ZstdNoDictCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD_NO_DICT, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } - - @Override - public boolean supports(Setting setting) { - return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); - } - - @Override - public Set aliases() { - return Mode.ZSTD_NO_DICT.getAliases(); - } -} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java deleted file mode 100644 index af4e92b78ed0f..0000000000000 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import com.github.luben.zstd.Zstd; - -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -import java.io.IOException; - -/** ZSTD Compression Mode (without a dictionary support). */ -public class ZstdNoDictCompressionMode extends CompressionMode { - - private static final int NUM_SUB_BLOCKS = 10; - private static final int DEFAULT_COMPRESSION_LEVEL = 6; - - private final int compressionLevel; - - /** default constructor */ - protected ZstdNoDictCompressionMode() { - this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; - } - - /** - * Creates a new instance with the given compression level. - * - * @param compressionLevel The compression level. - */ - protected ZstdNoDictCompressionMode(int compressionLevel) { - this.compressionLevel = compressionLevel; - } - - /** Creates a new compressor instance.*/ - @Override - public Compressor newCompressor() { - return new ZstdCompressor(compressionLevel); - } - - /** Creates a new decompressor instance. */ - @Override - public Decompressor newDecompressor() { - return new ZstdDecompressor(); - } - - /** zstandard compressor */ - private static final class ZstdCompressor extends Compressor { - - private final int compressionLevel; - private byte[] compressedBuffer; - - /** compressor with a given compresion level */ - public ZstdCompressor(int compressionLevel) { - this.compressionLevel = compressionLevel; - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { - assert offset >= 0 : "offset value must be greater than 0"; - - int blockLength = (length + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; - out.writeVInt(blockLength); - - final int end = offset + length; - assert end >= 0 : "buffer read size must be greater than 0"; - - for (int start = offset; start < end; start += blockLength) { - int l = Math.min(blockLength, end - start); - - if (l == 0) { - out.writeVInt(0); - return; - } - - final int maxCompressedLength = (int) Zstd.compressBound(l); - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, maxCompressedLength); - - int compressedSize = (int) Zstd.compressByteArray( - compressedBuffer, - 0, - compressedBuffer.length, - bytes, - start, - l, - compressionLevel - ); - - out.writeVInt(compressedSize); - out.writeBytes(compressedBuffer, compressedSize); - } - } - - @Override - public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { - final int length = (int) buffersInput.size(); - byte[] bytes = new byte[length]; - buffersInput.readBytes(bytes, 0, length); - compress(bytes, 0, length, out); - } - - @Override - public void close() throws IOException {} - } - - /** zstandard decompressor */ - private static final class ZstdDecompressor extends Decompressor { - - private byte[] compressed; - - /** default decompressor */ - public ZstdDecompressor() { - compressed = BytesRef.EMPTY_BYTES; - } - - @Override - public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength : "buffer read size must be within limit"; - - if (length == 0) { - bytes.length = 0; - return; - } - - final int blockLength = in.readVInt(); - bytes.offset = bytes.length = 0; - int offsetInBlock = 0; - int offsetInBytesRef = offset; - - // Skip unneeded blocks - while (offsetInBlock + blockLength < offset) { - final int compressedLength = in.readVInt(); - in.skipBytes(compressedLength); - offsetInBlock += blockLength; - offsetInBytesRef -= blockLength; - } - - // Read blocks that intersect with the interval we need - while (offsetInBlock < offset + length) { - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); - final int compressedLength = in.readVInt(); - if (compressedLength == 0) { - return; - } - compressed = ArrayUtil.growNoCopy(compressed, compressedLength); - in.readBytes(compressed, 0, compressedLength); - - int l = Math.min(blockLength, originalLength - offsetInBlock); - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + l); - - byte[] output = new byte[l]; - - final int uncompressed = (int) Zstd.decompressByteArray(output, 0, l, compressed, 0, compressedLength); - System.arraycopy(output, 0, bytes.bytes, bytes.length, uncompressed); - - bytes.length += uncompressed; - offsetInBlock += blockLength; - } - - bytes.offset = offsetInBytesRef; - bytes.length = length; - - assert bytes.isValid() : "decompression output is corrupted."; - } - - @Override - public Decompressor clone() { - return new ZstdDecompressor(); - } - } -} diff --git a/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec deleted file mode 100644 index ba5054055d00a..0000000000000 --- a/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ /dev/null @@ -1,3 +0,0 @@ -org.opensearch.index.codec.customcodecs.ZstdCodec -org.opensearch.index.codec.customcodecs.ZstdNoDictCodec -org.opensearch.index.codec.customcodecs.ZstdDeprecatedCodec diff --git a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java deleted file mode 100644 index cc794eb2c48f1..0000000000000 --- a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.ByteBuffersDataOutput; -import org.apache.lucene.tests.util.LineFileDocs; -import org.apache.lucene.tests.util.TestUtil; -import org.apache.lucene.util.BytesRef; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Random; - -/** - * Test cases for compressors (based on {@See org.opensearch.common.compress.DeflateCompressTests}). - */ -public abstract class AbstractCompressorTests extends OpenSearchTestCase { - - abstract Compressor compressor(); - - abstract Decompressor decompressor(); - - public void testEmpty() throws IOException { - final byte[] bytes = "".getBytes(StandardCharsets.UTF_8); - doTest(bytes); - } - - public void testShortLiterals() throws IOException { - final byte[] bytes = "1234567345673456745608910123".getBytes(StandardCharsets.UTF_8); - doTest(bytes); - } - - public void testRandom() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - final byte[] bytes = new byte[TestUtil.nextInt(r, 1, 100000)]; - r.nextBytes(bytes); - doTest(bytes); - } - } - - public void testLineDocs() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 10; i++) { - int numDocs = TestUtil.nextInt(r, 1, 200); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - for (int j = 0; j < numDocs; j++) { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - doTest(bos.toByteArray()); - } - lineFileDocs.close(); - } - - public void testRepetitionsL() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numLongs = TestUtil.nextInt(r, 1, 10000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - long theValue = r.nextLong(); - for (int j = 0; j < numLongs; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsI() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numInts = TestUtil.nextInt(r, 1, 20000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int theValue = r.nextInt(); - for (int j = 0; j < numInts; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsS() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numShorts = TestUtil.nextInt(r, 1, 40000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - short theValue = (short) r.nextInt(65535); - for (int j = 0; j < numShorts; j++) { - if (r.nextInt(10) == 0) { - theValue = (short) r.nextInt(65535); - } - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testMixed() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 2; ++i) { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int prevInt = r.nextInt(); - long prevLong = r.nextLong(); - while (bos.size() < 400000) { - switch (r.nextInt(4)) { - case 0: - addInt(r, prevInt, bos); - break; - case 1: - addLong(r, prevLong, bos); - break; - case 2: - addString(lineFileDocs, bos); - break; - case 3: - addBytes(r, bos); - break; - default: - throw new IllegalStateException("Random is broken"); - } - } - doTest(bos.toByteArray()); - } - } - - private void addLong(Random r, long prev, ByteArrayOutputStream bos) { - long theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addInt(Random r, int prev, ByteArrayOutputStream bos) { - int theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - - private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException { - byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)]; - r.nextBytes(bytes); - bos.write(bytes); - } - - private void doTest(byte[] bytes) throws IOException { - final int length = bytes.length; - - ByteBuffersDataInput in = new ByteBuffersDataInput(List.of(ByteBuffer.wrap(bytes))); - ByteBuffersDataOutput out = new ByteBuffersDataOutput(); - - // let's compress - Compressor compressor = compressor(); - compressor.compress(in, out); - byte[] compressed = out.toArrayCopy(); - - // let's decompress - BytesRef outbytes = new BytesRef(); - Decompressor decompressor = decompressor(); - decompressor.decompress(new ByteArrayDataInput(compressed), length, 0, length, outbytes); - - // get the uncompressed array out of outbytes - byte[] restored = new byte[outbytes.length]; - System.arraycopy(outbytes.bytes, 0, restored, 0, outbytes.length); - - assertArrayEquals(bytes, restored); - } - -} diff --git a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/CustomCodecTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/CustomCodecTests.java deleted file mode 100644 index 5365b9e222d9a..0000000000000 --- a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/CustomCodecTests.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.LogManager; -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; -import org.apache.lucene.document.Document; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; -import org.opensearch.common.settings.IndexScopedSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.CodecService; -import org.opensearch.index.codec.CodecServiceConfig; -import org.opensearch.index.codec.CodecServiceFactory; -import org.opensearch.index.codec.CodecSettings; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.similarity.SimilarityService; -import org.opensearch.indices.mapper.MapperRegistry; -import org.opensearch.plugins.MapperPlugin; -import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.util.Collections; -import java.util.Optional; - -import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_CODEC; -import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_NO_DICT_CODEC; -import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; - -@SuppressCodecs("*") // we test against default codec so never get a random one here! -public class CustomCodecTests extends OpenSearchTestCase { - - private CustomCodecPlugin plugin; - - @Before - public void setup() { - plugin = new CustomCodecPlugin(); - } - - public void testZstd() throws Exception { - Codec codec = createCodecService(false).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDict() throws Exception { - Codec codec = createCodecService(false).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdDeprecatedCodec() { - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> createCodecService(false).codec("ZSTD_DEPRECATED") - ); - assertTrue(e.getMessage().startsWith("failed to find codec")); - } - - public void testZstdWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel, "zstd").codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel, "zstd_no_dict").codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); - } - - public void testBestCompressionWithCompressionLevel() { - final Settings zstdSettings = Settings.builder() - .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) - .build(); - - // able to validate zstd - final IndexScopedSettings zstdIndexScopedSettings = new IndexScopedSettings( - zstdSettings, - IndexScopedSettings.BUILT_IN_INDEX_SETTINGS - ); - zstdIndexScopedSettings.validate(zstdSettings, true); - } - - public void testLuceneCodecsWithCompressionLevel() { - final Settings customCodecSettings = Settings.builder() - .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom("zstd", "zstd_no_dict")) - .build(); - - final IndexScopedSettings customCodecIndexScopedSettings = new IndexScopedSettings( - customCodecSettings, - IndexScopedSettings.BUILT_IN_INDEX_SETTINGS - ); - customCodecIndexScopedSettings.validate(customCodecSettings, true); - } - - public void testZstandardCompressionLevelSupport() throws Exception { - CodecService codecService = createCodecService(false); - CodecSettings zstdCodec = (CodecSettings) codecService.codec("zstd"); - CodecSettings zstdNoDictCodec = (CodecSettings) codecService.codec("zstd_no_dict"); - assertTrue(zstdCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); - assertTrue(zstdNoDictCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); - } - - public void testDefaultMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("default"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); - } - - public void testBestCompressionMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); - } - - public void testZstdMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Codec actual) throws Exception { - SegmentReader sr = getSegmentReader(actual); - String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); - assertNotNull(v); - assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); - } - - private void assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode expected, Codec actual) throws Exception { - SegmentReader sr = getSegmentReader(actual); - String v = sr.getSegmentInfo().info.getAttribute(Lucene95CustomStoredFieldsFormat.MODE_KEY); - assertNotNull(v); - assertEquals(expected, Lucene95CustomCodec.Mode.valueOf(v)); - } - - private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { - Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); - if (isMapperServiceNull) { - return new CustomCodecService(null, IndexSettingsModule.newIndexSettings("_na", nodeSettings), LogManager.getLogger("test")); - } - return buildCodecService(nodeSettings); - } - - private CodecService createCodecService(int randomCompressionLevel, String codec) throws IOException { - Settings nodeSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("index.codec", codec) - .put("index.codec.compression_level", randomCompressionLevel) - .build(); - return buildCodecService(nodeSettings); - } - - private CodecService buildCodecService(Settings nodeSettings) throws IOException { - - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); - SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, nodeSettings).indexAnalyzers; - MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER); - MapperService service = new MapperService( - indexSettings, - indexAnalyzers, - xContentRegistry(), - similarityService, - mapperRegistry, - () -> null, - () -> false, - null - ); - - Optional customCodecServiceFactory = plugin.getCustomCodecServiceFactory(indexSettings); - if (customCodecServiceFactory.isPresent()) { - return customCodecServiceFactory.get().createCodecService(new CodecServiceConfig(indexSettings, service, logger)); - } - return new CustomCodecService(service, indexSettings, LogManager.getLogger("test")); - } - - private SegmentReader getSegmentReader(Codec codec) throws IOException { - Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(null); - iwc.setCodec(codec); - IndexWriter iw = new IndexWriter(dir, iwc); - iw.addDocument(new Document()); - iw.commit(); - iw.close(); - DirectoryReader ir = DirectoryReader.open(dir); - SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); - ir.close(); - dir.close(); - return sr; - } - -} diff --git a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java deleted file mode 100644 index e87fb56770e4c..0000000000000 --- a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.opensearch.test.OpenSearchTestCase; - -public class Lucene95CustomStoredFieldsFormatTests extends OpenSearchTestCase { - - public void testDefaultLucene95CustomCodecMode() { - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat(); - assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); - } - - public void testZstdNoDictLucene95CustomCodecMode() { - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD_NO_DICT - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); - } - - public void testZstdModeWithCompressionLevel() { - int randomCompressionLevel = randomIntBetween(1, 6); - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD, - randomCompressionLevel - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); - assertEquals(randomCompressionLevel, lucene95CustomStoredFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictLucene95CustomCodecModeWithCompressionLevel() { - int randomCompressionLevel = randomIntBetween(1, 6); - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD_NO_DICT, - randomCompressionLevel - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); - assertEquals(randomCompressionLevel, lucene95CustomStoredFieldsFormat.getCompressionLevel()); - } - -} diff --git a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java deleted file mode 100644 index 78cf62c08f889..0000000000000 --- a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; - -/** - * Test ZSTD compression (with dictionary enabled) - */ -public class ZstdCompressorTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdCompressionMode().newCompressor(); - private final Decompressor decompressor = new ZstdCompressionMode().newDecompressor(); - - @Override - Compressor compressor() { - return compressor; - } - - @Override - Decompressor decompressor() { - return decompressor; - } -} diff --git a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java deleted file mode 100644 index 2eda81a6af2ab..0000000000000 --- a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; - -/** - * Test ZSTD compression (with no dictionary). - */ -public class ZstdNoDictCompressorTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdNoDictCompressionMode().newCompressor(); - private final Decompressor decompressor = new ZstdNoDictCompressionMode().newDecompressor(); - - @Override - Compressor compressor() { - return compressor; - } - - @Override - Decompressor decompressor() { - return decompressor; - } -} diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index 23070c389a7b1..6bed6564cfd36 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -48,6 +48,7 @@ import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; @@ -99,7 +100,14 @@ public TransportAddress[] addressesFromString(String address) { return new TransportAddress[] { poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress()) }; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); + return new MockTransportService( + Settings.EMPTY, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null, + NoopTracer.INSTANCE + ); } protected List buildDynamicHosts(Settings nodeSettings, int nodes) { diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java index c792fe6d96728..3311ddc8842f2 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java @@ -47,6 +47,7 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsResolver; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; @@ -80,7 +81,8 @@ protected MockTransportService createTransportService() { ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null + null, + NoopTracer.INSTANCE ); } diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index c63085deb466f..b4af9773f33de 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -38,6 +38,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -109,7 +110,7 @@ public void setProjectName() { @Before public void createTransportService() { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); } @After diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java index b7b8ef0226d4e..b93172537d419 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java @@ -51,6 +51,7 @@ public class CorrelationCodecTests extends OpenSearchTestCase { * test correlation vector index * @throws Exception Exception */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8329") public void testCorrelationVectorIndex() throws Exception { Function perFieldCorrelationVectorsProvider = mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService)); diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java index 5fb6acc8346db..6dd451ea37465 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java @@ -11,7 +11,9 @@ import org.opensearch.test.telemetry.tracing.MockSpanData; import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import io.opentelemetry.sdk.common.CompletableResultCode; @@ -21,7 +23,7 @@ public class InMemorySingletonSpanExporter implements SpanExporter { - private static final InMemorySingletonSpanExporter INSTANCE = new InMemorySingletonSpanExporter(InMemorySpanExporter.create()); + public static final InMemorySingletonSpanExporter INSTANCE = new InMemorySingletonSpanExporter(InMemorySpanExporter.create()); private static InMemorySpanExporter delegate; @@ -62,10 +64,30 @@ private List convertSpanDataListToMockSpanDataList(List spanData.getStartEpochNanos(), spanData.getEndEpochNanos(), spanData.hasEnded(), - spanData.getName() + spanData.getName(), + getAttributes(spanData) ) ) .collect(Collectors.toList()); return mockSpanDataList; } + + private Map getAttributes(SpanData spanData) { + if (spanData.getAttributes() != null) { + return spanData.getAttributes() + .asMap() + .entrySet() + .stream() + .collect(Collectors.toMap(e -> e.getKey().getKey(), e -> e.getValue())); + } else { + return Collections.emptyMap(); + } + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java index 476a5a9cabdc7..949a58f6cab41 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java @@ -63,6 +63,8 @@ public void testSanityCheckWhenTracingDisabled() throws Exception { ensureGreen(); refresh(); + InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; + exporter.reset(); // Make the search call; client.prepareSearch().setQuery(queryStringQuery("fox")).get(); @@ -70,7 +72,6 @@ public void testSanityCheckWhenTracingDisabled() throws Exception { // Sleep for about 3s to wait for traces are published (the delay is 1s) Thread.sleep(3000); - InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.create(); assertTrue(exporter.getFinishedSpanItems().isEmpty()); } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index 9f99099e85c9f..2d0111e64faad 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -14,6 +14,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.telemetry.tracing.TelemetryValidators; import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; @@ -38,6 +39,7 @@ protected Settings nodeSettings(int nodeOrdinal) { "org.opensearch.telemetry.tracing.InMemorySingletonSpanExporter" ) .put(OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(TelemetrySettings.TRACER_SAMPLER_PROBABILITY.getKey(), 1.0d) .build(); } @@ -52,13 +54,9 @@ protected boolean addMockTelemetryPlugin() { } public void testSanityChecksWhenTracingEnabled() throws Exception { - Client client = client(); + Client client = internalCluster().clusterManagerClient(); // ENABLE TRACING - client.admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true)) - .get(); + updateTelemetrySetting(client, true); // Create Index and ingest data String indexName = "test-index-11"; @@ -70,9 +68,12 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { ensureGreen(); refresh(); - // Make the search calls; - client.prepareSearch().setQuery(queryStringQuery("fox")).get(); - client.prepareSearch().setQuery(queryStringQuery("jumps")).get(); + // Make the search calls; adding the searchType and PreFilterShardSize to make the query path predictable across all the runs. + client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("fox")).get(); + client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("jumps")).get(); + + ensureGreen(); + refresh(); // Sleep for about 3s to wait for traces are published, delay is (the delay is 1s). Thread.sleep(3000); @@ -81,15 +82,23 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { Arrays.asList( new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId(), - new NumberOfTraceIDsEqualToRequests(), + new NumberOfTraceIDsEqualToRequests(Attributes.create().addAttribute("action", "indices:data/read/search[phase/query]")), new TotalRootSpansEqualToRequests() ) ); - InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.create(); + InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; if (!exporter.getFinishedSpanItems().isEmpty()) { validators.validate(exporter.getFinishedSpanItems(), 2); } } + private static void updateTelemetrySetting(Client client, boolean value) { + client.admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), value)) + .get(); + } + } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java index 6165df6a591d6..ecf9ad9f17f87 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java @@ -56,6 +56,7 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; import org.opensearch.nio.SocketChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.NioGroupFactory; import org.opensearch.transport.nio.PageAllocator; @@ -106,9 +107,10 @@ public NioHttpServerTransport( NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, NioGroupFactory nioGroupFactory, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { - super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.nioGroupFactory = nioGroupFactory; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java index a3475c2ea2969..ec266d76eff3d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java @@ -50,6 +50,7 @@ import org.opensearch.http.nio.NioHttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -117,7 +118,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap( NIO_HTTP_TRANSPORT_NAME, @@ -130,7 +132,8 @@ public Map> getHttpTransports( xContentRegistry, dispatcher, getNioGroupFactory(settings), - clusterSettings + clusterSettings, + tracer ) ); } diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java index 89e6e9ce88408..09594673de5b2 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java @@ -56,6 +56,7 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; @@ -186,7 +187,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -236,7 +238,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -255,7 +258,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); @@ -298,7 +302,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -372,7 +377,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -438,7 +444,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -500,7 +507,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 5d2f835aff4b0..ea20d8000f640 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -67,6 +67,7 @@ import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -118,7 +119,7 @@ private static MockTransportService startTransport( boolean success = false; final Settings s = Settings.builder().put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ThreadPool.Names.SAME, ClusterSearchShardsRequest::new, (request, channel, task) -> { diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 75083a929b491..686fc78dcec8a 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -114,7 +114,6 @@ private void printClusterRouting() throws IOException, ParseException { * This test verifies that segment replication does not break when primary shards are on lower OS version. It does this * by verifying replica shards contains same number of documents as primary's. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9685") public void testIndexingWithPrimaryOnBwcNodes() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); diff --git a/release-notes/opensearch.release-notes-2.10.0.md b/release-notes/opensearch.release-notes-2.10.0.md new file mode 100644 index 0000000000000..9d5f75d61ee2a --- /dev/null +++ b/release-notes/opensearch.release-notes-2.10.0.md @@ -0,0 +1,136 @@ +## 2023-09-08 Version 2.10.0 Release Notes + +## [2.10] + +### Added +- Add server version as REST response header [#6583](https://github.com/opensearch-project/OpenSearch/issues/6583) +- Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) +- Introduce new static cluster setting to control slice computation for concurrent segment search. ([#8847](https://github.com/opensearch-project/OpenSearch/pull/8884)) +- Add configuration for file cache size to max remote data ratio to prevent oversubscription of file cache ([#8606](https://github.com/opensearch-project/OpenSearch/pull/8606)) +- Disallow compression level to be set for default and best_compression index codecs ([#8737]()https://github.com/opensearch-project/OpenSearch/pull/8737) +- [distribution/archives] [Linux] [x64] Provide the variant of the distributions bundled with JRE ([#8195]()https://github.com/opensearch-project/OpenSearch/pull/8195) +- Prioritize replica shard movement during shard relocation ([#8875](https://github.com/opensearch-project/OpenSearch/pull/8875)) +- Introducing Default and Best Compression codecs as their algorithm name ([#9123](https://github.com/opensearch-project/OpenSearch/pull/9123)) +- Make SearchTemplateRequest implement IndicesRequest.Replaceable ([#9122](https://github.com/opensearch-project/OpenSearch/pull/9122)) +- [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) +- [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) +- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393) [#9454](https://github.com/opensearch-project/OpenSearch/pull/9454)) +- [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) +- Allow test clusters to run with TLS ([#8900](https://github.com/opensearch-project/OpenSearch/pull/8900)) +- Add jdk.incubator.vector module support for JDK 20+ ([#8601](https://github.com/opensearch-project/OpenSearch/pull/8601)) +- [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) +- Add support for reading partial files to HDFS repository ([#9513](https://github.com/opensearch-project/OpenSearch/issues/9513)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) +- [BWC and API enforcement] Decorate the existing APIs with proper annotations (part 1) ([#9520](https://github.com/opensearch-project/OpenSearch/pull/9520)) +- Add support for extensions to search responses using SearchExtBuilder ([#9379](https://github.com/opensearch-project/OpenSearch/pull/9379)) +- [Remote State] Create service to publish cluster state to remote store ([#9160](https://github.com/opensearch-project/OpenSearch/pull/9160)) +- Core crypto library to perform encryption and decryption of source content ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) +- Expose DelimitedTermFrequencyTokenFilter to allow providing term frequencies along with terms ([#9479](https://github.com/opensearch-project/OpenSearch/pull/9479)) +- APIs for performing async blob reads and async downloads from the repository using multiple streams ([#9592](https://github.com/opensearch-project/OpenSearch/issues/9592)) +- Add concurrent segment search related metrics to node and index stats ([#9622](https://github.com/opensearch-project/OpenSearch/issues/9622)) +- Add average concurrency metric for concurrent segment search ([#9670](https://github.com/opensearch-project/OpenSearch/issues/9670)) +- Introduce cluster default remote translog buffer interval setting ([#9584](https://github.com/opensearch-project/OpenSearch/pull/9584)) +- Added encryption-sdk lib to provide encryption and decryption capabilities ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466) [#9289](https://github.com/opensearch-project/OpenSearch/pull/9289)) +- [Segment Replication] Adding segment replication statistics rolled up at index, node and cluster level ([#9709](https://github.com/opensearch-project/OpenSearch/pull/9709)) +- Added crypto-kms plugin to provide AWS KMS based key providers for encryption/decryption. ([#8465](https://github.com/opensearch-project/OpenSearch/pull/8465)) +- [Remote state] Integrate remote cluster state in publish/commit flow ([#9665](https://github.com/opensearch-project/OpenSearch/pull/9665)) +- [Remote Store] Changes to introduce repository registration during bootstrap via node attributes. ([#9105](https://github.com/opensearch-project/OpenSearch/pull/9105)) +- [Remote state] Auto restore index metadata from last known cluster state ([#9831](https://github.com/opensearch-project/OpenSearch/pull/9831)) + +### Dependencies +- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) +- Bump `io.grpc:grpc-context` from 1.46.0 to 1.57.1 ([#8726](https://github.com/opensearch-project/OpenSearch/pull/8726), [#9145](https://github.com/opensearch-project/OpenSearch/pull/9145)) +- Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.5 to 12.1.6 ([#8724](https://github.com/opensearch-project/OpenSearch/pull/8724)) +- Bump `commons-codec:commons-codec` from 1.15 to 1.16.0 ([#8725](https://github.com/opensearch-project/OpenSearch/pull/8725)) +- Bump `org.apache.zookeeper:zookeeper` from 3.8.1 to 3.9.0 ([#8844](https://github.com/opensearch-project/OpenSearch/pull/8844), [#9146](https://github.com/opensearch-project/OpenSearch/pull/9146)) +- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842)) +- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838)) +- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840)) +- OpenJDK Update (July 2023 Patch releases) ([#8869](https://github.com/opensearch-project/OpenSearch/pull/8869)) +- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) +- Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) +- Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) +- Bump `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) +- Bump `com.google.jimfs:jimfs` from 1.2 to 1.3.0 ([#9080](https://github.com/opensearch-project/OpenSearch/pull/9080)) +- Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.8 to 1.1.9 ([#9147](https://github.com/opensearch-project/OpenSearch/pull/9147)) +- Bump `org.apache.maven:maven-model` from 3.9.3 to 3.9.4 ([#9148](https://github.com/opensearch-project/OpenSearch/pull/9148)) +- Bump `com.azure:azure-storage-blob` from 12.22.3 to 12.23.0 ([#9231](https://github.com/opensearch-project/OpenSearch/pull/9231)) +- Bump `com.diffplug.spotless` from 6.19.0 to 6.20.0 ([#9227](https://github.com/opensearch-project/OpenSearch/pull/9227)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.google.code.gson:gson` from 2.9.0 to 2.10.1 ([#9230](https://github.com/opensearch-project/OpenSearch/pull/9230)) +- Bump `lycheeverse/lychee-action` from 1.2.0 to 1.8.0 ([#9228](https://github.com/opensearch-project/OpenSearch/pull/9228)) +- Bump `snakeyaml` from 2.0 to 2.1 ([#9269](https://github.com/opensearch-project/OpenSearch/pull/9269)) +- Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) +- Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) +- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) +- Bump `io.grpc:grpc-api` from 1.57.1 to 1.57.2 ([#9578](https://github.com/opensearch-project/OpenSearch/pull/9578)) +- Add Encryption SDK dependencies ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) + +### Changed +- Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) +- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) +- Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) +- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) +- [Refactor] StreamIO from common to core.common namespace in core lib ([#8157](https://github.com/opensearch-project/OpenSearch/pull/8157)) +- [Refactor] Remaining HPPC to java.util collections ([#8730](https://github.com/opensearch-project/OpenSearch/pull/8730)) +- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) +- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735)) +- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805)) +- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) +- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) +- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) +- Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) +- Performance improvements for BytesRefHash ([#8788](https://github.com/opensearch-project/OpenSearch/pull/8788)) +- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) +- [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) +- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) +- [Remote Store] Restrict user override for remote store index level settings ([#8812](https://github.com/opensearch-project/OpenSearch/pull/8812)) +- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636)) +- Make MultiBucketConsumerService thread safe to use across slices during search ([#9047](https://github.com/opensearch-project/OpenSearch/pull/9047)) +- Removed blocking wait in TransportGetSnapshotsAction which was exhausting generic threadpool ([#8377](https://github.com/opensearch-project/OpenSearch/pull/8377)) +- Adds support for tracing runnable scenarios ([#8831](https://github.com/opensearch-project/OpenSearch/pull/8831)) +- Change shard_size and shard_min_doc_count evaluation to happen in shard level reduce phase ([#9085](https://github.com/opensearch-project/OpenSearch/pull/9085)) +- Add attributes to startSpan methods ([#9199](https://github.com/opensearch-project/OpenSearch/pull/9199)) +- [Refactor] Task foundation classes to core library - pt 1 ([#9082](https://github.com/opensearch-project/OpenSearch/pull/9082)) +- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) +- Add base class for parameterizing the search based tests #9083 ([#9083](https://github.com/opensearch-project/OpenSearch/pull/9083)) +- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) +- Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) +- Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) +- Refactor Compressors from CompressorFactory to CompressorRegistry for extensibility ([#9262](https://github.com/opensearch-project/OpenSearch/pull/9262)) +- Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) +- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) +- Add support to use trace propagated from client ([#9506](https://github.com/opensearch-project/OpenSearch/pull/9506)) +- Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support ([#9469](https://github.com/opensearch-project/OpenSearch/pull/9469)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) +- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) +- Redefine telemetry context restoration and propagation ([#9617](https://github.com/opensearch-project/OpenSearch/pull/9617)) +- Use non-concurrent path for sort request on timeseries index and field([#9562](https://github.com/opensearch-project/OpenSearch/pull/9562)) +- Added sampler based on `Blanket Probabilistic Sampling rate` and `Override for on demand` ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Decouple replication lag from logic to fail stale replicas ([#9507](https://github.com/opensearch-project/OpenSearch/pull/9507)) +- Improve performance of rounding dates in date_histogram aggregation ([#9727](https://github.com/opensearch-project/OpenSearch/pull/9727)) +- [Remote Store] Add support for Remote Translog Store stats in `_remotestore/stats/` API ([#9263](https://github.com/opensearch-project/OpenSearch/pull/9263)) +- Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) +- Cleanup Unreferenced file on segment merge failure ([#9503](https://github.com/opensearch-project/OpenSearch/pull/9503)) +- Move zstd compression codec to external custom-codecs repository ([#9422](https://github.com/opensearch-project/OpenSearch/issues/9422]) +- [Remote Store] Add support for Remote Translog Store upload stats in `_nodes/stats/` API ([#8908](https://github.com/opensearch-project/OpenSearch/pull/8908)) +- [Remote Store] Removing feature flag to mark feature GA ([#9761](https://github.com/opensearch-project/OpenSearch/pull/9761)) + +### Removed +- Remove provision to create Remote Indices without Remote Translog Store ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) + +### Fixed +- Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) +- Fix null_pointer_exception when creating or updating ingest pipeline ([#9259](https://github.com/opensearch-project/OpenSearch/pull/9259)) +- Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) +- Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) +- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) +- Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) +- [Segment Replication] Fixed bug where replica shard temporarily serves stale data during an engine reset ([#9495](https://github.com/opensearch-project/OpenSearch/pull/9495)) +- Disable shard/segment level search_after short cutting if track_total_hits != false ([#9683](https://github.com/opensearch-project/OpenSearch/pull/9683)) +- [Segment Replication] Fixed bug where bytes behind metric is not accurate ([#9686](https://github.com/opensearch-project/OpenSearch/pull/9686)) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java new file mode 100644 index 0000000000000..6fcc89cfe9e9a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { + + private static String INDEX_NAME = "test-index"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); + internalCluster().startDataOnlyNodes(numDataOnlyNodes); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); + ensureGreen(index); + } + } + + private Map initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + Map indexStats = indexData(1, false, INDEX_NAME); + assertEquals(shardCount * (replicaCount + 1), getNumShards(INDEX_NAME).totalNumShards); + ensureGreen(INDEX_NAME); + return indexStats; + } + + public void testFullClusterRestoreStaleDelete() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + setReplicaCount(0); + setReplicaCount(2); + setReplicaCount(0); + setReplicaCount(1); + setReplicaCount(0); + setReplicaCount(1); + setReplicaCount(0); + setReplicaCount(2); + setReplicaCount(0); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + BlobPath baseMetadataPath = repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add("cluster-state") + .add(getClusterState().metadata().clusterUUID()); + + assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); + + Map indexMetadataMap = remoteClusterStateService.getLatestIndexMetadata( + cluster().getClusterName(), + getClusterState().metadata().clusterUUID() + ); + assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); + assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); + } + + private void setReplicaCount(int replicaCount) { + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index 2fbad9099c056..033ea75b68958 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -15,7 +15,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.shard.IndexShard; @@ -31,7 +30,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -264,9 +262,9 @@ public void testFailStaleReplica() throws Exception { } public void testWithDocumentReplicationEnabledIndex() throws Exception { - assumeTrue( - "Can't create DocRep index with remote store enabled. Skipping.", - Objects.equals(featureFlagSettings().get(FeatureFlags.REMOTE_STORE, "false"), "false") + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store. Cannot create DocRep indices with Remote store enabled", + segmentReplicationWithRemoteEnabled() ); Settings settings = Settings.builder() .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(500)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index a16167c8ad0c4..8e68a8bde39d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -18,7 +18,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; @@ -204,8 +203,7 @@ protected IndexShard getIndexShard(String node, String indexName) { } protected boolean segmentReplicationWithRemoteEnabled() { - return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue() - && "true".equalsIgnoreCase(featureFlagSettings().get(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL)); + return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue(); } protected Releasable blockReplication(List nodes, CountDownLatch latch) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 5855ed7470559..33bc5a8f3afe6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -15,6 +15,7 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; @@ -38,6 +39,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.termvectors.TermVectorsRequestBuilder; +import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; @@ -57,6 +60,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; @@ -81,6 +85,7 @@ import org.opensearch.transport.TransportService; import org.junit.Before; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -1622,4 +1627,154 @@ public void testRealtimeMultiGetRequestsUnsuccessful() { assertTrue(mgetResponse.getResponses()[1].isFailed()); } + + /** + * Tests whether segment replication supports realtime termvector requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeTermVectorRequestsSuccessful() throws IOException { + final String primary = internalCluster().startDataOnlyNode(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + ) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + + TermVectorsResponse response = client(replica).prepareTermVectors(indexOrAlias(), "1").get(); + assertFalse(response.isExists()); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // non realtime termvectors 1 + response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors 1 + TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + Fields fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + // index doc 2 with routing + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(2)) + .setRouting(id) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // realtime termvectors 2 with routing + resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(2)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(id) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + } + + public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { + final String primary = internalCluster().startDataOnlyNode(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .setRouting(id) + .execute() + .actionGet(); + + // non realtime termvectors 1 + TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors (preference = _replica) + TermVectorsRequestBuilder resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setPreference(Preference.REPLICA.type()) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + // realtime termvectors (with routing set) + resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(routingOtherShard) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index a5017144ce890..bc55f6cc2cbcb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -12,15 +12,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; -import org.opensearch.test.FeatureFlagSetter; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -44,17 +41,6 @@ public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends Abs protected static final String TRANSLOG_REPOSITORY_NAME = "my-translog-repo-1"; protected static final String INDEX_NAME = "remote-store-test-idx-1"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - - @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - FeatureFlagSetter.set(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index 66259869a5d28..d8b7718a55377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.recovery.IndexPrimaryRelocationIT; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -50,15 +49,6 @@ public Settings indexSettings() { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index d11c09928a08f..4eb1cc7703735 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.recovery.IndexRecoveryIT; @@ -46,15 +45,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 16264253b4000..4ebccb9b9e551 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -20,7 +20,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; @@ -65,7 +64,6 @@ public void teardown() { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(FeatureFlags.REMOTE_STORE, "true") .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 1660201dcb307..9a684ce0a1482 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -22,7 +22,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -48,7 +47,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -136,15 +134,6 @@ protected Settings nodeSettings(int nodeOrdinal) { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - public Settings indexSettings() { return defaultIndexSettings(); } @@ -186,13 +175,7 @@ public static Settings remoteStoreClusterSettings( Path translogRepoPath ) { Settings.Builder settingsBuilder = Settings.builder(); - - if (randomBoolean()) { - settingsBuilder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); - } - settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); - return settingsBuilder.build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 1b817408596ab..45c3ef7f5bae5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -9,7 +9,6 @@ package org.opensearch.remotestore; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.SegmentReplicationIT; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -22,9 +21,6 @@ /** * This class runs Segment Replication Integ test suite with remote store enabled. - * Setup is similar to SegmentReplicationRemoteStoreIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. After this is moved out of experimental, we can combine and keep only one - * test suite for Segment and Remote store integration tests. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationUsingRemoteStoreIT extends SegmentReplicationIT { @@ -47,15 +43,6 @@ protected boolean segmentReplicationWithRemoteEnabled() { return true; } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index fa0944e5bfee0..0da4d81a8871e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -9,7 +9,6 @@ package org.opensearch.remotestore; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.SegmentReplicationPressureIT; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -22,8 +21,6 @@ /** * This class executes the SegmentReplicationPressureIT suite with remote store integration enabled. - * Setup is similar to SegmentReplicationPressureIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationWithRemoteStorePressureIT extends SegmentReplicationPressureIT { @@ -36,15 +33,6 @@ protected boolean segmentReplicationWithRemoteEnabled() { return true; } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index 26bfe59618275..e6325987d330f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -43,7 +47,12 @@ import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; @@ -56,7 +65,24 @@ import static org.hamcrest.Matchers.closeTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MissingValueIT extends OpenSearchIntegTestCase { +public class MissingValueIT extends ParameterizedOpenSearchIntegTestCase { + + public MissingValueIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected int maximumNumberOfShards() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 24f2b1bbdabfc..5f794d2abf878 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,8 +32,6 @@ package org.opensearch.search.profile.query; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -42,23 +40,20 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -66,32 +61,8 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; - -public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { - private final boolean concurrentSearchEnabled; - private static final String MAX_PREFIX = "max_"; - private static final String MIN_PREFIX = "min_"; - private static final String AVG_PREFIX = "avg_"; - private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; - - public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { - super(settings); - this.concurrentSearchEnabled = concurrentSearchEnabled; - } - - @ParametersFactory - public static Collection parameters() { - return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } - ); - } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); - } +public class QueryProfilerIT extends OpenSearchIntegTestCase { /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -258,7 +229,6 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -301,7 +271,6 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); - assertQueryProfileResult(result); // Check the children List children = result.getProfiledChildren(); @@ -313,14 +282,12 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); - assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); - assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -363,7 +330,6 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -409,7 +375,6 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -450,7 +415,6 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -491,7 +455,6 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -531,7 +494,6 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -585,7 +547,6 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); - assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -618,35 +579,4 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } - private void assertQueryProfileResult(ProfileResult result) { - Map breakdown = result.getTimeBreakdown(); - Long maxSliceTime = result.getMaxSliceTime(); - Long minSliceTime = result.getMinSliceTime(); - Long avgSliceTime = result.getAvgSliceTime(); - if (concurrentSearchEnabled) { - assertNotNull(maxSliceTime); - assertNotNull(minSliceTime); - assertNotNull(avgSliceTime); - assertThat(breakdown.size(), equalTo(66)); - for (QueryTimingType queryTimingType : QueryTimingType.values()) { - if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { - String maxTimingType = MAX_PREFIX + queryTimingType; - String minTimingType = MIN_PREFIX + queryTimingType; - String avgTimingType = AVG_PREFIX + queryTimingType; - assertNotNull(breakdown.get(maxTimingType)); - assertNotNull(breakdown.get(minTimingType)); - assertNotNull(breakdown.get(avgTimingType)); - assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); - assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); - assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); - } - } - } else { - assertThat(maxSliceTime, is(nullValue())); - assertThat(minSliceTime, is(nullValue())); - assertThat(avgSliceTime, is(nullValue())); - assertThat(breakdown.size(), equalTo(27)); - } - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 9ec8cee2685fe..066d82483ae91 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -44,7 +44,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -57,7 +56,6 @@ import org.opensearch.repositories.RepositoryShardId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -159,7 +157,6 @@ public void testCloneSnapshotIndex() throws Exception { public void testCloneShallowSnapshotIndex() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); @@ -204,7 +201,6 @@ public void testCloneShallowSnapshotIndex() throws Exception { public void testShallowCloneNameAvailability() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStorePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode( @@ -245,7 +241,6 @@ public void testShallowCloneNameAvailability() throws Exception { public void testCloneAfterRepoShallowSettingEnabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); @@ -280,7 +275,6 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { public void testCloneAfterRepoShallowSettingDisabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 448b860683668..e79bf1c16b586 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -16,9 +16,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -41,7 +39,6 @@ public class DeleteSnapshotIT extends AbstractSnapshotIntegTestCase { public void testDeleteSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -69,7 +66,6 @@ public void testDeleteSnapshot() throws Exception { public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -98,8 +94,6 @@ public void testDeleteShallowCopySnapshot() throws Exception { // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -142,8 +136,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String dataNode = internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -228,8 +220,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -288,8 +278,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java index 979df95547d06..8e2580aba1745 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -39,7 +39,6 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.junit.Before; @@ -67,8 +66,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") .put(remoteStoreClusterSettings(remoteStoreRepoName, absolutePath)) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 7327f1127eab2..7117818451e14 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; @@ -82,11 +81,6 @@ import static org.hamcrest.Matchers.nullValue; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testParallelRestoreOperations() { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 30394611ac48f..c574233d25051 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -47,7 +47,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -77,7 +76,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") .build(); } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index e9fe10b15e817..46775466aa615 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -972,12 +972,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestCatAction(catActions)); registerHandler.accept(new RestDecommissionAction()); registerHandler.accept(new RestGetDecommissionStateAction()); - - // Remote Store APIs - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - registerHandler.accept(new RestRemoteStoreStatsAction()); - registerHandler.accept(new RestRestoreRemoteStoreAction()); - } + registerHandler.accept(new RestRemoteStoreStatsAction()); + registerHandler.accept(new RestRestoreRemoteStoreAction()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 6f58a532b48a1..256850da0d503 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -39,7 +39,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -152,7 +151,7 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { storageType = in.readEnum(StorageType.class); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && in.getVersion().onOrAfter(Version.V_2_9_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { sourceRemoteStoreRepository = in.readOptionalString(); } } @@ -176,7 +175,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeEnum(storageType); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && out.getVersion().onOrAfter(Version.V_2_9_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeOptionalString(sourceRemoteStoreRepository); } } @@ -616,11 +615,6 @@ public RestoreSnapshotRequest source(Map source) { } } else if (name.equals("source_remote_store_repository")) { - if (!FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - throw new IllegalArgumentException( - "Unsupported parameter " + name + ". Please enable remote store feature flag for this experimental feature" - ); - } if (entry.getValue() instanceof String) { setSourceRemoteStoreRepository((String) entry.getValue()); } else { @@ -671,7 +665,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (storageType != null) { storageType.toXContent(builder); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && sourceRemoteStoreRepository != null) { + if (sourceRemoteStoreRepository != null) { builder.field("source_remote_store_repository", sourceRemoteStoreRepository); } builder.endObject(); @@ -701,48 +695,29 @@ public boolean equals(Object o) { && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) - && Objects.equals(storageType, that.storageType); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - equals = Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); - } + && Objects.equals(storageType, that.storageType) + && Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); return equals; } @Override public int hashCode() { int result; - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType, - sourceRemoteStoreRepository - ); - } else { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType - ); - } + result = Objects.hash( + snapshot, + repository, + indicesOptions, + renamePattern, + renameReplacement, + waitForCompletion, + includeGlobalState, + partial, + includeAliases, + indexSettings, + snapshotUuid, + storageType, + sourceRemoteStoreRepository + ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); return result; diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index efc21b2c03808..fddda0ef1f9a7 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -72,7 +72,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.action.ActionListener; @@ -539,7 +538,7 @@ protected Releasable checkPrimaryLimits(BulkShardRequest request, boolean rerout } // TODO - While removing remote store flag, this can be encapsulated to single class with common interface for backpressure // service - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { + if (remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { remoteStorePressureService.validateSegmentsUploadLag(request.shardId()); } } diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java index a76506b39f811..b7e8a29bd4027 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -87,15 +88,24 @@ public TransportTermVectorsAction( @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { + + String preference = request.request().preference; + // For a real time request on a seg rep index, use primary shard as the preferred query shard. + if (request.request().realtime() + && preference == null + && state.getMetadata().isSegmentReplicationEnabled(request.concreteIndex())) { + preference = Preference.PRIMARY.type(); + } + if (request.request().doc() != null && request.request().routing() == null) { // artificial document without routing specified, ignore its "id" and use either random shard or according to preference GroupShardsIterator groupShardsIter = clusterService.operationRouting() - .searchShards(state, new String[] { request.concreteIndex() }, null, request.request().preference()); + .searchShards(state, new String[] { request.concreteIndex() }, null, preference); return groupShardsIter.iterator().next(); } return clusterService.operationRouting() - .getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()); + .getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), preference); } @Override diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index 65ffdafc423fd..667eb4529fe38 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -37,6 +37,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.LocalTimeOffset.Gap; import org.opensearch.common.LocalTimeOffset.Overlap; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -412,6 +413,21 @@ public Rounding build() { } private abstract class PreparedRounding implements Prepared { + /** + * The maximum limit up to which array-based prepared rounding is used. + * 128 is a power of two that isn't huge. We might be able to do + * better if the limit was based on the actual type of prepared + * rounding but this'll do for now. + */ + private static final int DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD = 128; + + /** + * The maximum limit up to which linear search is used, otherwise binary search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: PR #9727 + */ + private static final int LINEAR_SEARCH_ARRAY_ROUNDING_MAX_THRESHOLD = 64; + /** * Attempt to build a {@link Prepared} implementation that relies on pre-calcuated * "round down" points. If there would be more than {@code max} points then return @@ -435,7 +451,9 @@ protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) values = ArrayUtil.grow(values, i + 1); values[i++] = rounded; } - return new ArrayRounding(values, i, this); + return i <= LINEAR_SEARCH_ARRAY_ROUNDING_MAX_THRESHOLD + ? new BidirectionalLinearSearchArrayRounding(values, i, this) + : new BinarySearchArrayRounding(values, i, this); } } @@ -521,12 +539,11 @@ private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) { @Override public Prepared prepare(long minUtcMillis, long maxUtcMillis) { - /* - * 128 is a power of two that isn't huge. We might be able to do - * better if the limit was based on the actual type of prepared - * rounding but this'll do for now. - */ - return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray(minUtcMillis, maxUtcMillis, 128); + return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray( + minUtcMillis, + maxUtcMillis, + PreparedRounding.DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD + ); } private TimeUnitPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) { @@ -1330,14 +1347,19 @@ public static Rounding read(StreamInput in) throws IOException { /** * Implementation of {@link Prepared} using pre-calculated "round down" points. * + *

+ * It uses binary search to find the greatest round-down point less than or equal to the given timestamp. + * * @opensearch.internal */ - private static class ArrayRounding implements Prepared { + @InternalApi + static class BinarySearchArrayRounding implements Prepared { private final long[] values; private final int max; private final Prepared delegate; - private ArrayRounding(long[] values, int max, Prepared delegate) { + BinarySearchArrayRounding(long[] values, int max, Prepared delegate) { + assert max > 0 : "at least one round-down point must be present"; this.values = values; this.max = max; this.delegate = delegate; @@ -1365,4 +1387,64 @@ public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { return delegate.roundingSize(utcMillis, timeUnit); } } + + /** + * Implementation of {@link Prepared} using pre-calculated "round down" points. + * + *

+ * It uses linear search to find the greatest round-down point less than or equal to the given timestamp. + * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of + * branch mispredictions and pipeline stalls, and accesses memory sequentially. + * + *

+ * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present + * at either side of the array. This is helpful for time-series data where velocity increases over time, so more + * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. + * + * @opensearch.internal + */ + @InternalApi + static class BidirectionalLinearSearchArrayRounding implements Prepared { + private final long[] ascending; + private final long[] descending; + private final Prepared delegate; + + BidirectionalLinearSearchArrayRounding(long[] values, int max, Prepared delegate) { + assert max > 0 : "at least one round-down point must be present"; + this.delegate = delegate; + int len = (max + 1) >>> 1; // rounded-up to handle odd number of values + ascending = new long[len]; + descending = new long[len]; + + for (int i = 0; i < len; i++) { + ascending[i] = values[i]; + descending[i] = values[max - i - 1]; + } + } + + @Override + public long round(long utcMillis) { + int i = 0; + for (; i < ascending.length; i++) { + if (descending[i] <= utcMillis) { + return descending[i]; + } + if (ascending[i] > utcMillis) { + assert i > 0 : "utcMillis must be after " + ascending[0]; + return ascending[i - 1]; + } + } + return ascending[i - 1]; + } + + @Override + public long nextRoundingValue(long utcMillis) { + return delegate.nextRoundingValue(utcMillis); + } + + @Override + public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { + return delegate.roundingSize(utcMillis, timeUnit); + } + } } diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index 3539ea7f3f526..8870e26c373e9 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -57,6 +57,7 @@ import org.opensearch.plugins.NetworkPlugin; import org.opensearch.tasks.RawTaskStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; @@ -147,7 +148,8 @@ public NetworkModule( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { this.settings = settings; for (NetworkPlugin plugin : plugins) { @@ -160,7 +162,8 @@ public NetworkModule( xContentRegistry, networkService, dispatcher, - clusterSettings + clusterSettings, + tracer ); for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fca160fd2b994..a95403cb6915b 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -655,6 +655,7 @@ public void apply(Settings value, Settings current, Settings previous) { // Settings related to admission control PerformanceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, PerformanceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + PerformanceTrackerSettings.GLOBAL_IO_WINDOW_DURATION_SETTING, // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, @@ -675,7 +676,8 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, - RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING ) ) ); @@ -688,8 +690,6 @@ public void apply(Settings value, Settings current, Settings previous) { * setting should be moved to {@link #BUILT_IN_CLUSTER_SETTINGS}. */ public static final Map, List> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( - List.of(FeatureFlags.REMOTE_STORE), - List.of(IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), List.of( SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index e109d2a871cef..90abc0a0765c1 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -36,7 +36,6 @@ protected FeatureFlagSettings( new HashSet<>( Arrays.asList( FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING, - FeatureFlags.REMOTE_STORE_SETTING, FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 30a3550d62c8a..5b2afc44600bd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -213,6 +213,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + // Settings for remote store enablement + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); @@ -236,12 +241,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.REMOTE_STORE, - List.of( - IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, - IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING - ), FeatureFlags.CONCURRENT_SEGMENT_SEARCH, List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index e2663b56c5cca..b89d2d0549823 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,19 +20,12 @@ * @opensearch.internal */ public class FeatureFlags { - /** * Gates the visibility of the segment replication experimental features that allows users to test unreleased beta features. */ public static final String SEGMENT_REPLICATION_EXPERIMENTAL = "opensearch.experimental.feature.segment_replication_experimental.enabled"; - /** - * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. - * Once the feature is ready for production release, this feature flag can be removed. - */ - public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; - /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. @@ -96,8 +89,6 @@ public static boolean isEnabled(String featureFlagName) { Property.NodeScope ); - public static final Setting REMOTE_STORE_SETTING = Setting.boolSetting(REMOTE_STORE, false, Property.NodeScope); - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index ac6297598ecf5..e42ac8daa3b1c 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -63,6 +63,8 @@ import org.opensearch.env.NodeMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.repositories.RepositoryMissingException; @@ -126,7 +128,8 @@ public void start( MetadataUpgrader metadataUpgrader, PersistedClusterStateService persistedClusterStateService, RemoteClusterStateService remoteClusterStateService, - PersistedStateRegistry persistedStateRegistry + PersistedStateRegistry persistedStateRegistry, + RemoteStoreRestoreService remoteStoreRestoreService ) { assert this.persistedStateRegistry == null : "Persisted state registry should only be set once"; this.persistedStateRegistry = persistedStateRegistry; @@ -154,7 +157,7 @@ public void start( PersistedState remotePersistedState = null; boolean success = false; try { - final ClusterState clusterState = prepareInitialClusterState( + ClusterState clusterState = prepareInitialClusterState( transportService, clusterService, ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) @@ -164,10 +167,28 @@ public void start( ); if (DiscoveryNode.isClusterManagerNode(settings)) { - persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); if (isRemoteStoreClusterStateEnabled(settings)) { + // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote + // If there is no valid state on remote, continue with initial empty state + // If there is a valid state, then restore index metadata using this state + if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { + String lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + clusterState.getClusterName().value() + ); + if (!ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID)) { + // Load state from remote + final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( + clusterState, + lastKnownClusterUUID, + false, + new String[] {} + ); + clusterState = remoteRestoreResult.getClusterState(); + } + } remotePersistedState = new RemotePersistedState(remoteClusterStateService); } + persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { persistedState = new AsyncLucenePersistedState( settings, @@ -651,12 +672,6 @@ public void setCurrentTerm(long currentTerm) { @Override public void setLastAcceptedState(ClusterState clusterState) { try { - if (lastAcceptedState == null || lastAcceptedState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // On the initial bootstrap, repository will not be available. So we do not persist the cluster state and bail out. - logger.info("Cluster is not yet ready to publish state to remote store"); - lastAcceptedState = clusterState; - return; - } final ClusterMetadataManifest manifest; if (shouldWriteFullClusterState(clusterState)) { manifest = remoteClusterStateService.writeFullMetadata(clusterState); @@ -706,13 +721,8 @@ private boolean shouldWriteFullClusterState(ClusterState clusterState) { @Override public void markLastAcceptedStateAsCommitted() { try { - if (lastAcceptedState == null - || lastAcceptedManifest == null - || lastAcceptedState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // On the initial bootstrap, repository will not be available. So we do not persist the cluster state and bail out. - logger.trace("Cluster is not yet ready to publish state to remote store"); - return; - } + assert lastAcceptedState != null : "Last accepted state is not present"; + assert lastAcceptedManifest != null : "Last accepted manifest is not present"; final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( lastAcceptedState, lastAcceptedManifest diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index cac77f9996438..040c0663efbd9 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -41,6 +41,7 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField NODE_ID_FIELD = new ParseField("node_id"); private static final ParseField COMMITTED_FIELD = new ParseField("committed"); private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); private static long term(Object[] fields) { return (long) fields[0]; @@ -74,6 +75,10 @@ private static List indices(Object[] fields) { return (List) fields[7]; } + private static String previousClusterUUID(Object[] fields) { + return (String) fields[8]; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> new ClusterMetadataManifest( @@ -84,7 +89,8 @@ private static List indices(Object[] fields) { opensearchVersion(fields), nodeId(fields), committed(fields), - indices(fields) + indices(fields), + previousClusterUUID(fields) ) ); @@ -101,6 +107,7 @@ private static List indices(Object[] fields) { (p, c) -> UploadedIndexMetadata.fromXContent(p), INDICES_FIELD ); + PARSER.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); } private final List indices; @@ -111,6 +118,7 @@ private static List indices(Object[] fields) { private final Version opensearchVersion; private final String nodeId; private final boolean committed; + private final String previousClusterUUID; public List getIndices() { return indices; @@ -144,6 +152,10 @@ public boolean isCommitted() { return committed; } + public String getPreviousClusterUUID() { + return previousClusterUUID; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -152,7 +164,8 @@ public ClusterMetadataManifest( Version opensearchVersion, String nodeId, boolean committed, - List indices + List indices, + String previousClusterUUID ) { this.clusterTerm = clusterTerm; this.stateVersion = version; @@ -162,6 +175,7 @@ public ClusterMetadataManifest( this.nodeId = nodeId; this.committed = committed; this.indices = Collections.unmodifiableList(indices); + this.previousClusterUUID = previousClusterUUID; } public ClusterMetadataManifest(StreamInput in) throws IOException { @@ -173,6 +187,7 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.nodeId = in.readString(); this.committed = in.readBoolean(); this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); + this.previousClusterUUID = in.readString(); } public static Builder builder() { @@ -199,6 +214,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } builder.endArray(); + builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); return builder; } @@ -212,6 +228,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); out.writeBoolean(committed); out.writeCollection(indices); + out.writeString(previousClusterUUID); } @Override @@ -230,12 +247,23 @@ public boolean equals(Object o) { && Objects.equals(stateUUID, that.stateUUID) && Objects.equals(opensearchVersion, that.opensearchVersion) && Objects.equals(nodeId, that.nodeId) - && Objects.equals(committed, that.committed); + && Objects.equals(committed, that.committed) + && Objects.equals(previousClusterUUID, that.previousClusterUUID); } @Override public int hashCode() { - return Objects.hash(indices, clusterTerm, stateVersion, clusterUUID, stateUUID, opensearchVersion, nodeId, committed); + return Objects.hash( + indices, + clusterTerm, + stateVersion, + clusterUUID, + stateUUID, + opensearchVersion, + nodeId, + committed, + previousClusterUUID + ); } @Override @@ -261,6 +289,7 @@ public static class Builder { private String stateUUID; private Version opensearchVersion; private String nodeId; + private String previousClusterUUID; private boolean committed; public Builder indices(List indices) { @@ -307,6 +336,11 @@ public List getIndices() { return indices; } + public Builder previousClusterUUID(String previousClusterUUID) { + this.previousClusterUUID = previousClusterUUID; + return this; + } + public Builder() { indices = new ArrayList<>(); } @@ -320,6 +354,7 @@ public Builder(ClusterMetadataManifest manifest) { this.nodeId = manifest.nodeId; this.committed = manifest.committed; this.indices = new ArrayList<>(manifest.indices); + this.previousClusterUUID = manifest.previousClusterUUID; } public ClusterMetadataManifest build() { @@ -331,7 +366,8 @@ public ClusterMetadataManifest build() { opensearchVersion, nodeId, committed, - indices + indices, + previousClusterUUID ); } @@ -387,10 +423,15 @@ public UploadedIndexMetadata(StreamInput in) throws IOException { this.uploadedFilename = in.readString(); } - public String getUploadedFilename() { + public String getUploadedFilePath() { return uploadedFilename; } + public String getUploadedFilename() { + String[] splitPath = uploadedFilename.split("/"); + return splitPath[splitPath.length - 1]; + } + public String getIndexName() { return indexName; } @@ -404,7 +445,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.startObject() .field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) - .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilename()) + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) .endObject(); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index d2f6fd8ebe228..cf750bb11f3f8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -18,6 +18,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -28,12 +29,14 @@ import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; +import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -42,12 +45,16 @@ import java.util.Base64; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -67,6 +74,12 @@ public class RemoteClusterStateService implements Closeable { public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; + public static final int RETAINED_MANIFESTS = 10; + + public static final String DELIMITER = "__"; + + private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); + public static final int INDEX_METADATA_UPLOAD_WAIT_MILLIS = 20000; public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( @@ -90,33 +103,48 @@ public class RemoteClusterStateService implements Closeable { Property.Final ); - private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); - - public static final String DELIMITER = "__"; + private static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; + private static final String INDEX_PATH_TOKEN = "index"; + private static final String MANIFEST_PATH_TOKEN = "manifest"; + private static final String MANIFEST_FILE_PREFIX = "manifest"; + private static final String INDEX_METADATA_FILE_PREFIX = "metadata"; private final String nodeId; private final Supplier repositoriesService; private final Settings settings; private final LongSupplier relativeTimeNanosSupplier; + private final ThreadPool threadpool; private BlobStoreRepository blobStoreRepository; + private BlobStoreTransferService blobStoreTransferService; private volatile TimeValue slowWriteLoggingThreshold; + private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); + public RemoteClusterStateService( String nodeId, Supplier repositoriesService, Settings settings, ClusterSettings clusterSettings, - LongSupplier relativeTimeNanosSupplier + LongSupplier relativeTimeNanosSupplier, + ThreadPool threadPool ) { assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; this.nodeId = nodeId; this.repositoriesService = repositoriesService; this.settings = settings; this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; + this.threadpool = threadPool; this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); } + private BlobStoreTransferService getBlobStoreTransferService() { + if (blobStoreTransferService == null) { + blobStoreTransferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadpool); + } + return blobStoreTransferService; + } + /** * This method uploads entire cluster state metadata to the configured blob store. For now only index metadata upload is supported. This method should be * invoked by the elected cluster manager when the remote cluster state is enabled. @@ -130,14 +158,20 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState) thro logger.error("Local node is not elected cluster manager. Exiting"); return null; } - ensureRepositorySet(); + + // should fetch the previous cluster UUID before writing full cluster state. + // Whenever a new election happens, a new leader will be elected and it might have stale previous UUID + final String previousClusterUUID = fetchPreviousClusterUUID( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); // any validations before/after upload ? final List allUploadedIndexMetadata = writeIndexMetadataParallel( clusterState, new ArrayList<>(clusterState.metadata().indices().values()) ); - final ClusterMetadataManifest manifest = uploadManifest(clusterState, allUploadedIndexMetadata, false); + final ClusterMetadataManifest manifest = uploadManifest(clusterState, allUploadedIndexMetadata, previousClusterUUID, false); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( @@ -159,8 +193,8 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState) thro /** * This method uploads the diff between the previous cluster state and the current cluster state. The previous manifest file is needed to create the new - * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current cluster - * state. + * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current + * cluster state. * * @return The uploaded ClusterMetadataManifest file */ @@ -214,7 +248,14 @@ public ClusterMetadataManifest writeIncrementalMetadata( for (String removedIndexName : previousStateIndexMetadataVersionByName.keySet()) { allUploadedIndexMetadata.remove(removedIndexName); } - final ClusterMetadataManifest manifest = uploadManifest(clusterState, new ArrayList<>(allUploadedIndexMetadata.values()), false); + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + new ArrayList<>(allUploadedIndexMetadata.values()), + previousManifest.getPreviousClusterUUID(), + false + ); + deleteStaleClusterMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), RETAINED_MANIFESTS); + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( @@ -238,10 +279,10 @@ public ClusterMetadataManifest writeIncrementalMetadata( /** * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. + * * @param clusterState current ClusterState * @param toUpload list of IndexMetadata to upload * @return {@code List} list of IndexMetadata uploaded to remote - * @throws IOException */ private List writeIndexMetadataParallel(ClusterState clusterState, List toUpload) throws IOException { @@ -312,10 +353,10 @@ private List writeIndexMetadataParallel(ClusterState clus /** * Allows async Upload of IndexMetadata to remote + * * @param clusterState current ClusterState * @param indexMetadata {@link IndexMetadata} to upload * @param latchedActionListener listener to respond back on after upload finishes - * @throws IOException */ private void writeIndexMetadataAsync( ClusterState clusterState, @@ -327,13 +368,13 @@ private void writeIndexMetadataAsync( clusterState.metadata().clusterUUID(), indexMetadata.getIndexUUID() ); - + final String indexMetadataFilename = indexMetadataFileName(indexMetadata); ActionListener completionListener = ActionListener.wrap( resp -> latchedActionListener.onResponse( new UploadedIndexMetadata( indexMetadata.getIndex().getName(), indexMetadata.getIndexUUID(), - indexMetadataContainer.path().buildAsString() + indexMetadataFileName(indexMetadata) + indexMetadataContainer.path().buildAsString() + indexMetadataFilename ) ), ex -> latchedActionListener.onFailure(new IndexMetadataTransferException(indexMetadata.getIndex().toString(), ex)) @@ -342,7 +383,7 @@ private void writeIndexMetadataAsync( INDEX_METADATA_FORMAT.writeAsync( indexMetadata, indexMetadataContainer, - indexMetadataFileName(indexMetadata), + indexMetadataFilename, blobStoreRepository.getCompressor(), completionListener ); @@ -357,12 +398,7 @@ public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterStat } assert clusterState != null : "Last accepted cluster state is not set"; assert previousManifest != null : "Last cluster metadata manifest is not set"; - return uploadManifest(clusterState, previousManifest.getIndices(), true); - } - - public ClusterState getLatestClusterState(String clusterUUID) { - // todo - return null; + return uploadManifest(clusterState, previousManifest.getIndices(), previousManifest.getPreviousClusterUUID(), true); } @Override @@ -372,12 +408,8 @@ public void close() throws IOException { } } - // Visible for testing - void ensureRepositorySet() { - if (blobStoreRepository != null) { - return; - } - assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; + public void start() { + assert isRemoteStoreClusterStateEnabled(settings) == true : "Remote cluster state is not enabled"; final String remoteStoreRepo = settings.get( Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY ); @@ -390,6 +422,7 @@ void ensureRepositorySet() { private ClusterMetadataManifest uploadManifest( ClusterState clusterState, List uploadedIndexMetadata, + String previousClusterUUID, boolean committed ) throws IOException { synchronized (this) { @@ -402,7 +435,8 @@ private ClusterMetadataManifest uploadManifest( Version.CURRENT, nodeId, committed, - uploadedIndexMetadata + uploadedIndexMetadata, + previousClusterUUID ); writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); return manifest; @@ -415,24 +449,37 @@ private void writeMetadataManifest(String clusterName, String clusterUUID, Clust CLUSTER_METADATA_MANIFEST_FORMAT.write(uploadManifest, metadataManifestContainer, fileName, blobStoreRepository.getCompressor()); } + private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { + final Optional latestManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!latestManifest.isPresent()) { + final String previousClusterUUID = getLastKnownUUIDFromRemote(clusterName); + assert !clusterUUID.equals(previousClusterUUID) : "Last cluster UUID is same current cluster UUID"; + return previousClusterUUID; + } + return latestManifest.get().getPreviousClusterUUID(); + } + private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX return blobStoreRepository.blobStore() - .blobContainer( - blobStoreRepository.basePath() - .add(encodeString(clusterName)) - .add("cluster-state") - .add(clusterUUID) - .add("index") - .add(indexUUID) - ); + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); } private BlobContainer manifestContainer(String clusterName, String clusterUUID) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest + return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); + } + + private BlobPath getCusterMetadataBasePath(String clusterName, String clusterUUID) { + return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); + } + + private BlobContainer clusterUUIDContainer(String clusterName) { return blobStoreRepository.blobStore() .blobContainer( - blobStoreRepository.basePath().add(encodeString(clusterName)).add("cluster-state").add(clusterUUID).add("manifest") + blobStoreRepository.basePath() + .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) + .add(CLUSTER_STATE_PATH_TOKEN) ); } @@ -442,32 +489,44 @@ private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { private static String getManifestFileName(long term, long version) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637_456536447 + return String.join(DELIMITER, getManifestFileNamePrefix(term, version), RemoteStoreUtils.invertLong(System.currentTimeMillis())); + } + + private static String getManifestFileNamePrefix(long term, long version) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637 + return String.join(DELIMITER, MANIFEST_PATH_TOKEN, RemoteStoreUtils.invertLong(term), RemoteStoreUtils.invertLong(version)); + } + + private static String indexMetadataFileName(IndexMetadata indexMetadata) { return String.join( DELIMITER, - "manifest", - RemoteStoreUtils.invertLong(term), - RemoteStoreUtils.invertLong(version), - RemoteStoreUtils.invertLong(System.currentTimeMillis()) + INDEX_METADATA_FILE_PREFIX, + String.valueOf(indexMetadata.getVersion()), + String.valueOf(System.currentTimeMillis()) ); } - private static String indexMetadataFileName(IndexMetadata indexMetadata) { - return String.join(DELIMITER, "metadata", String.valueOf(indexMetadata.getVersion()), String.valueOf(System.currentTimeMillis())); + private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { + return getCusterMetadataBasePath(clusterName, clusterUUID).add(MANIFEST_PATH_TOKEN); } /** * Fetch latest index metadata from remote cluster state + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return {@code Map} latest IndexUUID to IndexMetadata map */ public Map getLatestIndexMetadata(String clusterName, String clusterUUID) throws IOException { - ensureRepositorySet(); + start(); Map remoteIndexMetadata = new HashMap<>(); - ClusterMetadataManifest clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) + Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!clusterMetadataManifest.isPresent()) { + throw new IllegalStateException("Latest index metadata is not present for the provided clusterUUID"); + } + assert Objects.equals(clusterUUID, clusterMetadataManifest.get().getClusterUUID()) : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { + for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.get().getIndices()) { IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); } @@ -476,6 +535,7 @@ public Map getLatestIndexMetadata(String clusterName, Str /** * Fetch index metadata from remote cluster state + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata @@ -499,22 +559,115 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U /** * Fetch latest ClusterMetadataManifest from remote state store + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return ClusterMetadataManifest */ - public ClusterMetadataManifest getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { - String latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); - return fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, latestManifestFileName); + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); + if (latestManifestFileName.isPresent()) { + return Optional.of(fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, latestManifestFileName.get())); + } + return Optional.empty(); + } + + /** + * Fetch the previous cluster UUIDs from remote state store and return the most recent valid cluster UUID + * + * @param clusterName The cluster name for which previous cluster UUID is to be fetched + * @return Last valid cluster UUID + */ + public String getLastKnownUUIDFromRemote(String clusterName) { + try { + Set clusterUUIDs = getAllClusterUUIDs(clusterName); + Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); + List validChain = createClusterChain(latestManifests); + if (validChain.isEmpty()) { + return ClusterState.UNKNOWN_UUID; + } + return validChain.get(0); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName) + ); + } + } + + private Set getAllClusterUUIDs(String clusterName) throws IOException { + Map clusterUUIDMetadata = clusterUUIDContainer(clusterName).children(); + if (clusterUUIDMetadata == null) { + return Collections.emptySet(); + } + return Collections.unmodifiableSet(clusterUUIDMetadata.keySet()); + } + + private Map getLatestManifestForAllClusterUUIDs(String clusterName, Set clusterUUIDs) { + Map manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID) + ); + } + } + return manifestsByClusterUUID; + } + + /** + * This method creates a valid cluster UUID chain. + * + * @param manifestsByClusterUUID Map of latest ClusterMetadataManifest for every cluster UUID + * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain + */ + private List createClusterChain(final Map manifestsByClusterUUID) { + final Map clusterUUIDGraph = manifestsByClusterUUID.values() + .stream() + .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); + final List validClusterUUIDs = manifestsByClusterUUID.values() + .stream() + .filter(m -> !isInvalidClusterUUID(m) && !clusterUUIDGraph.containsValue(m.getClusterUUID())) + .map(ClusterMetadataManifest::getClusterUUID) + .collect(Collectors.toList()); + if (validClusterUUIDs.isEmpty()) { + logger.info("There is no valid previous cluster UUID"); + return Collections.emptyList(); + } + if (validClusterUUIDs.size() > 1) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "The system has ended into multiple valid cluster states in the remote store. " + + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", + validClusterUUIDs + ) + ); + } + final List validChain = new ArrayList<>(); + String currentUUID = validClusterUUIDs.get(0); + while (!ClusterState.UNKNOWN_UUID.equals(currentUUID)) { + validChain.add(currentUUID); + // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph + currentUUID = clusterUUIDGraph.get(currentUUID); + } + return validChain; + } + + private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { + return !manifest.isCommitted() && manifest.getIndices().isEmpty(); } /** * Fetch latest ClusterMetadataManifest file from remote state store + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return latest ClusterMetadataManifest filename */ - private String getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { try { /** * {@link BlobContainer#listBlobsByPrefixInSortedOrder} will get the latest manifest file @@ -522,22 +675,23 @@ private String getLatestManifestFileName(String clusterName, String clusterUUID) * when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. */ List manifestFilesMetadata = manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( - "manifest" + DELIMITER, + MANIFEST_FILE_PREFIX + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC ); if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { - return manifestFilesMetadata.get(0).name(); + return Optional.of(manifestFilesMetadata.get(0).name()); } } catch (IOException e) { throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); } - - throw new IllegalStateException(String.format(Locale.ROOT, "Remote Cluster State not found - %s", clusterUUID)); + logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); + return Optional.empty(); } /** * Fetch ClusterMetadataManifest from remote state store + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return ClusterMetadataManifest @@ -572,4 +726,141 @@ public IndexMetadataTransferException(String errorDesc, Throwable cause) { super(errorDesc, cause); } } + + /** + * Purges all remote cluster state against provided cluster UUIDs + * @param clusterName name of the cluster + * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged + */ + public void deleteStaleClusterMetadata(String clusterName, List clusterUUIDs) { + clusterUUIDs.forEach(clusterUUID -> { + getBlobStoreTransferService().deleteAsync( + ThreadPool.Names.REMOTE_PURGE, + getCusterMetadataBasePath(clusterName, clusterUUID), + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("Deleted all remote cluster metadata for cluster UUID - {}", clusterUUID); + } + + @Override + public void onFailure(Exception e) { + logger.error( + new ParameterizedMessage( + "Exception occurred while deleting all remote cluster metadata for cluster UUID {}", + clusterUUID + ), + e + ); + } + } + ); + }); + } + + /** + * Deletes older than last {@code versionsToRetain} manifests. Also cleans up unreferenced IndexMetadata associated with older manifests + * @param clusterName name of the cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @param manifestsToRetain no of latest manifest files to keep in remote + */ + private void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { + if (deleteStaleMetadataRunning.compareAndSet(false, true) == false) { + logger.info("Delete stale cluster metadata task is already in progress."); + return; + } + try { + getBlobStoreTransferService().listAllInSortedOrderAsync( + ThreadPool.Names.REMOTE_PURGE, + getManifestFolderPath(clusterName, clusterUUID), + "manifest", + Integer.MAX_VALUE, + new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + if (blobMetadata.size() > manifestsToRetain) { + deleteClusterMetadata( + clusterName, + clusterUUID, + blobMetadata.subList(0, manifestsToRetain - 1), + blobMetadata.subList(manifestsToRetain - 1, blobMetadata.size()) + ); + } + deleteStaleMetadataRunning.set(false); + } + + @Override + public void onFailure(Exception e) { + logger.error( + new ParameterizedMessage( + "Exception occurred while deleting Remote Cluster Metadata for clusterUUIDs {}", + clusterUUID + ) + ); + deleteStaleMetadataRunning.set(false); + } + } + ); + } finally { + deleteStaleMetadataRunning.set(false); + } + } + + private void deleteClusterMetadata( + String clusterName, + String clusterUUID, + List activeManifestBlobMetadata, + List staleManifestBlobMetadata + ) { + try { + Set filesToKeep = new HashSet<>(); + Set staleManifestPaths = new HashSet<>(); + Set staleIndexMetadataPaths = new HashSet<>(); + activeManifestBlobMetadata.forEach(blobMetadata -> { + ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( + clusterName, + clusterUUID, + blobMetadata.name() + ); + clusterMetadataManifest.getIndices() + .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + }); + staleManifestBlobMetadata.forEach(blobMetadata -> { + ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( + clusterName, + clusterUUID, + blobMetadata.name() + ); + staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { + if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { + staleIndexMetadataPaths.add( + new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() + + uploadedIndexMetadata.getUploadedFilename() + + ".dat" + ); + } + }); + }); + + if (staleManifestPaths.isEmpty()) { + logger.info("No stale Remote Cluster Metadata files found"); + return; + } + + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); + } catch (IllegalStateException e) { + logger.error("Error while fetching Remote Cluster Metadata manifests", e); + } catch (IOException e) { + logger.error("Error while deleting stale Remote Cluster Metadata files", e); + } catch (Exception e) { + logger.error("Unexpected error while deleting stale Remote Cluster Metadata files", e); + } + } + + private void deleteStalePaths(String clusterName, String clusterUUID, List stalePaths) throws IOException { + logger.debug(String.format(Locale.ROOT, "Deleting stale files from remote - %s", stalePaths)); + getBlobStoreTransferService().deleteBlobs(getCusterMetadataBasePath(clusterName, clusterUUID), stalePaths); + } } diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index ce02dfb21c587..ed44102d0abe4 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -54,6 +54,12 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableHttpChannel; +import org.opensearch.telemetry.tracing.channels.TraceableRestChannel; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BindTransportException; @@ -105,7 +111,8 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final Set httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final HttpTracer tracer; + private final HttpTracer httpTracer; + private final Tracer tracer; protected AbstractHttpServerTransport( Settings settings, @@ -114,7 +121,8 @@ protected AbstractHttpServerTransport( ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer telemetryTracer ) { this.settings = settings; this.networkService = networkService; @@ -138,7 +146,8 @@ protected AbstractHttpServerTransport( this.port = SETTING_HTTP_PORT.get(settings); this.maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings); - this.tracer = new HttpTracer(settings, clusterSettings); + this.httpTracer = new HttpTracer(settings, clusterSettings); + this.tracer = telemetryTracer; } @Override @@ -352,19 +361,31 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { * @param httpChannel that received the http request */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { - handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); + final Span span = tracer.startSpan(SpanBuilder.from(httpRequest), httpRequest.getHeaders()); + try (final SpanScope httpRequestSpanScope = tracer.withSpanInScope(span)) { + HttpChannel traceableHttpChannel = TraceableHttpChannel.create(httpChannel, span, tracer); + handleIncomingRequest(httpRequest, traceableHttpChannel, httpRequest.getInboundException()); + } } // Visible for testing void dispatchRequest(final RestRequest restRequest, final RestChannel channel, final Throwable badRequestCause) { + RestChannel traceableRestChannel = channel; final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - if (badRequestCause != null) { - dispatcher.dispatchBadRequest(channel, threadContext, badRequestCause); - } else { - dispatcher.dispatchRequest(restRequest, channel, threadContext); + final Span span = tracer.startSpan(SpanBuilder.from(restRequest)); + try (final SpanScope spanScope = tracer.withSpanInScope(span)) { + if (channel != null) { + traceableRestChannel = TraceableRestChannel.create(channel, span, tracer); + } + if (badRequestCause != null) { + dispatcher.dispatchBadRequest(traceableRestChannel, threadContext, badRequestCause); + } else { + dispatcher.dispatchRequest(restRequest, traceableRestChannel, threadContext); + } } } + } private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { @@ -401,7 +422,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan restRequest = innerRestRequest; } - final HttpTracer trace = tracer.maybeTraceRequest(restRequest, exception); + final HttpTracer trace = httpTracer.maybeTraceRequest(restRequest, exception); /* * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index ff2a67e99a94d..03c71351294d5 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1063,11 +1063,11 @@ public boolean isSegRepEnabled() { } public boolean isSegRepLocalEnabled() { - return isSegRepEnabled() && !isSegRepWithRemoteEnabled(); + return isSegRepEnabled() && !isRemoteStoreEnabled(); } public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabled() && isRemoteStoreEnabled() && FeatureFlags.isEnabled(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); + return isSegRepEnabled() && isRemoteStoreEnabled(); } /** diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 5cdff14cae360..d05242a3aeaf7 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -36,7 +36,6 @@ import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.RestoreService; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -147,7 +146,7 @@ public RemoteRestoreResult restore( .forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); }); - } catch (IOException e) { + } catch (Exception e) { throw new IllegalStateException("Unable to restore remote index metadata", e); } } else { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 3cd2fb231e284..8ed75330f938e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3729,8 +3729,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.add( new RemoteStoreRefreshListener( this, - // Add the checkpoint publisher if the Segment Replciation via remote store is enabled. - indexSettings.isSegRepWithRemoteEnabled() ? this.checkpointPublisher : SegmentReplicationCheckpointPublisher.EMPTY, + this.checkpointPublisher, remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) ) ); diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index c6ae8b988aed0..5c2137ec742a4 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.resync.TransportResyncReplicationAction; import org.opensearch.common.inject.AbstractModule; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; @@ -288,9 +287,7 @@ protected void configure() { bind(RetentionLeaseSyncer.class).asEagerSingleton(); bind(SegmentReplicationCheckpointPublisher.class).asEagerSingleton(); bind(SegmentReplicationPressureService.class).asEagerSingleton(); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - bind(RemoteStorePressureService.class).asEagerSingleton(); - } + bind(RemoteStorePressureService.class).asEagerSingleton(); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 2e2a0762ea489..2ed3cb8d9e8ea 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -69,7 +69,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.OpenSearchExecutors; @@ -457,12 +456,9 @@ protected void closeInternal() { this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); - - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); - } + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); } /** diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 0904dc5fa18a4..dc538a03de595 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -55,7 +55,6 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; @@ -223,10 +222,7 @@ public IndicesClusterStateService( ); indexEventListeners.add(segmentReplicationTargetService); indexEventListeners.add(segmentReplicationSourceService); - // if remote store feature is not enabled, do not wire the remote upload pressure service as an IndexEventListener. - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - indexEventListeners.add(remoteStoreStatsTrackerFactory); - } + indexEventListeners.add(remoteStoreStatsTrackerFactory); this.segmentReplicationTargetService = segmentReplicationTargetService; this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index 114702ff0d351..a4b6843d177f5 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -220,7 +220,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * The device status. * - * @opensearch.internal + * @opensearch.internal] */ public static class DeviceStats implements Writeable, ToXContentFragment { @@ -235,6 +235,16 @@ public static class DeviceStats implements Writeable, ToXContentFragment { final long previousWritesCompleted; final long currentSectorsWritten; final long previousSectorsWritten; + final long currentIOTime; + final long previousIOTime; + final double currentReadTime; + final double previousReadTime; + final double currentWriteTime; + final double previousWriteTime; + final double currentReadLatency; + final double previousReadLatency; + final double currentWriteLatency; + final double previousWriteLatency; public DeviceStats( final int majorDeviceNumber, @@ -244,6 +254,11 @@ public DeviceStats( final long currentSectorsRead, final long currentWritesCompleted, final long currentSectorsWritten, + final long currentIOTime, + final double currentReadTime, + final double currentWriteTime, + final double currentReadLatency, + final double currentWriteLatency, final DeviceStats previousDeviceStats ) { this( @@ -257,7 +272,17 @@ public DeviceStats( currentSectorsRead, previousDeviceStats != null ? previousDeviceStats.currentSectorsRead : -1, currentWritesCompleted, - previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1 + previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1, + currentIOTime, + previousDeviceStats != null ? previousDeviceStats.currentIOTime : -1, + currentReadTime, + previousDeviceStats != null ? previousDeviceStats.previousReadTime : -1.0, + currentWriteTime, + previousDeviceStats != null ? previousDeviceStats.previousWriteTime : -1.0, + currentReadLatency, + previousDeviceStats != null ? previousDeviceStats.currentReadLatency : -1.0, + currentWriteLatency, + previousDeviceStats != null ? previousDeviceStats.currentWriteLatency : -1.0 ); } @@ -272,7 +297,17 @@ private DeviceStats( final long currentSectorsRead, final long previousSectorsRead, final long currentWritesCompleted, - final long previousWritesCompleted + final long previousWritesCompleted, + final long currentIOTime, + final long previousIOTime, + final double currentReadTime, + final double previousReadTime, + final double currentWriteTime, + final double previousWriteTime, + final double currentReadLatency, + final double previousReadLatency, + final double currentWriteLatency, + final double previousWriteLatency ) { this.majorDeviceNumber = majorDeviceNumber; this.minorDeviceNumber = minorDeviceNumber; @@ -285,6 +320,16 @@ private DeviceStats( this.previousSectorsRead = previousSectorsRead; this.currentSectorsWritten = currentSectorsWritten; this.previousSectorsWritten = previousSectorsWritten; + this.currentIOTime = currentIOTime; + this.previousIOTime = previousIOTime; + this.currentReadTime = currentReadTime; + this.previousReadTime = previousReadTime; + this.currentWriteTime = currentWriteTime; + this.previousWriteTime = previousWriteTime; + this.currentReadLatency = currentReadLatency; + this.previousReadLatency = previousReadLatency; + this.currentWriteLatency = currentWriteLatency; + this.previousWriteLatency = previousWriteLatency; } public DeviceStats(StreamInput in) throws IOException { @@ -299,6 +344,16 @@ public DeviceStats(StreamInput in) throws IOException { previousSectorsRead = in.readLong(); currentSectorsWritten = in.readLong(); previousSectorsWritten = in.readLong(); + currentIOTime = in.readLong(); + previousIOTime = in.readLong(); + currentReadTime = in.readDouble(); + previousReadTime = in.readDouble(); + currentWriteTime = in.readDouble(); + previousWriteTime = in.readDouble(); + currentReadLatency = in.readDouble(); + previousReadLatency = in.readDouble(); + currentWriteLatency = in.readDouble(); + previousWriteLatency = in.readDouble(); } @Override @@ -314,6 +369,15 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(previousSectorsRead); out.writeLong(currentSectorsWritten); out.writeLong(previousSectorsWritten); + out.writeLong(currentIOTime); + out.writeLong(previousIOTime); + out.writeDouble(currentReadTime); + out.writeDouble(currentWriteTime); + out.writeDouble(previousWriteTime); + out.writeDouble(currentReadLatency); + out.writeDouble(previousReadLatency); + out.writeDouble(currentWriteLatency); + out.writeDouble(previousWriteLatency); } public long operations() { @@ -334,6 +398,14 @@ public long writeOperations() { return (currentWritesCompleted - previousWritesCompleted); } + public long currentReadOperations() { + return currentReadsCompleted; + } + + public long currentWriteOpetations() { + return currentWritesCompleted; + } + public long readKilobytes() { if (previousSectorsRead == -1) return -1; @@ -346,6 +418,68 @@ public long writeKilobytes() { return (currentSectorsWritten - previousSectorsWritten) / 2; } + public long ioTimeInMillis() { + if (previousIOTime == -1) return -1; + + return (currentIOTime - previousIOTime); + } + + public double getWriteLatency() { + if(previousWriteLatency == -1.0) return -1.0; + return currentWriteLatency - previousWriteLatency; + } + + public double getNewWriteLatency() { + //double readLatency = getReadTime() / readOperations(); + double writeLatency = getWriteTime() / writeOperations(); + return writeLatency; + } + + public double getNewReadLatency() { + //double readLatency = getReadTime() / readOperations(); + double readLatency = getReadTime() / readOperations(); + return readLatency; + } + + public double getReadLatency() { + if(previousReadLatency == -1.0) return -1.0; + return currentReadLatency - previousReadLatency; + } + + public double getReadTime() { + if(previousReadTime == -1.0) return -1.0; + return currentReadTime - previousReadTime; + } + + public double getWriteTime() { + if(previousWriteTime == -1.0) return -1.0; + return currentWriteTime - previousWriteTime; + } + + public long getCurrentIOTime() { + return this.currentIOTime; + } + + public double getCurrentReadTime() { + return this.currentReadTime; + } + + public double getCurrentWriteTime() { + return this.currentWriteTime; + } + + public double getCurrentReadLatency() { + return this.currentReadLatency; + } + + public double getCurrentWriteLatency() { + return this.currentWriteLatency; + } + + public String getDeviceName() { + return this.deviceName; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("device_name", deviceName); @@ -354,6 +488,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(IoStats.WRITE_OPERATIONS, writeOperations()); builder.field(IoStats.READ_KILOBYTES, readKilobytes()); builder.field(IoStats.WRITE_KILOBYTES, writeKilobytes()); + builder.field(IoStats.IO_TIME_MS, ioTimeInMillis()); return builder; } @@ -371,6 +506,7 @@ public static class IoStats implements Writeable, ToXContentFragment { private static final String WRITE_OPERATIONS = "write_operations"; private static final String READ_KILOBYTES = "read_kilobytes"; private static final String WRITE_KILOBYTES = "write_kilobytes"; + private static final String IO_TIME_MS = "io_time_in_millis"; final DeviceStats[] devicesStats; final long totalOperations; @@ -378,6 +514,7 @@ public static class IoStats implements Writeable, ToXContentFragment { final long totalWriteOperations; final long totalReadKilobytes; final long totalWriteKilobytes; + final long totalIOTimeInMillis; public IoStats(final DeviceStats[] devicesStats) { this.devicesStats = devicesStats; @@ -386,18 +523,21 @@ public IoStats(final DeviceStats[] devicesStats) { long totalWriteOperations = 0; long totalReadKilobytes = 0; long totalWriteKilobytes = 0; + long totalIOTimeInMillis = 0; for (DeviceStats deviceStats : devicesStats) { totalOperations += deviceStats.operations() != -1 ? deviceStats.operations() : 0; totalReadOperations += deviceStats.readOperations() != -1 ? deviceStats.readOperations() : 0; totalWriteOperations += deviceStats.writeOperations() != -1 ? deviceStats.writeOperations() : 0; totalReadKilobytes += deviceStats.readKilobytes() != -1 ? deviceStats.readKilobytes() : 0; totalWriteKilobytes += deviceStats.writeKilobytes() != -1 ? deviceStats.writeKilobytes() : 0; + totalIOTimeInMillis += deviceStats.ioTimeInMillis() != -1 ? deviceStats.ioTimeInMillis() : 0; } this.totalOperations = totalOperations; this.totalReadOperations = totalReadOperations; this.totalWriteOperations = totalWriteOperations; this.totalReadKilobytes = totalReadKilobytes; this.totalWriteKilobytes = totalWriteKilobytes; + this.totalIOTimeInMillis = totalIOTimeInMillis; } public IoStats(StreamInput in) throws IOException { @@ -412,6 +552,7 @@ public IoStats(StreamInput in) throws IOException { this.totalWriteOperations = in.readLong(); this.totalReadKilobytes = in.readLong(); this.totalWriteKilobytes = in.readLong(); + this.totalIOTimeInMillis = in.readLong(); } @Override @@ -425,6 +566,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalWriteOperations); out.writeLong(totalReadKilobytes); out.writeLong(totalWriteKilobytes); + out.writeLong(totalIOTimeInMillis); } public DeviceStats[] getDevicesStats() { @@ -451,6 +593,10 @@ public long getTotalWriteKilobytes() { return totalWriteKilobytes; } + public long getTotalIOTimeMillis() { + return totalIOTimeInMillis; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (devicesStats.length > 0) { @@ -468,6 +614,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(WRITE_OPERATIONS, totalWriteOperations); builder.field(READ_KILOBYTES, totalReadKilobytes); builder.field(WRITE_KILOBYTES, totalWriteKilobytes); + builder.field(IO_TIME_MS, totalIOTimeInMillis); builder.endObject(); } return builder; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index e20d84cd9763e..ba5c222274bd2 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -123,6 +123,11 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, final long sectorsRead = Long.parseLong(fields[5]); final long writesCompleted = Long.parseLong(fields[7]); final long sectorsWritten = Long.parseLong(fields[9]); + final double readTime = Double.parseDouble(fields[6]); + final double writeTime = Double.parseDouble(fields[10]); + final double readLatency = readTime / readsCompleted; + final double writeLatency = writeTime / writesCompleted; + final long ioTime = Long.parseLong(fields[12]); final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -131,6 +136,11 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, sectorsRead, writesCompleted, sectorsWritten, + ioTime, + readTime, + writeTime, + readLatency, + writeLatency, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)) ); devicesStats.add(deviceStats); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 5f01c7eef9854..b38ecec444393 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -589,6 +589,19 @@ protected Node( new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() ); } + + TracerFactory tracerFactory; + if (FeatureFlags.isEnabled(TELEMETRY)) { + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + + tracer = tracerFactory.getTracer(); + resourcesToClose.add(tracer::close); final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -689,7 +702,8 @@ protected Node( repositoriesServiceReference::get, settings, clusterService.getClusterSettings(), - threadPool::preciseRelativeTimeInNanos + threadPool::preciseRelativeTimeInNanos, + threadPool ); } else { remoteClusterStateService = null; @@ -781,7 +795,8 @@ protected Node( performanceCollectorService, threadPool, settings, - clusterService.getClusterSettings() + clusterService.getClusterSettings(), + monitorService.fsService() ); final AliasValidator aliasValidator = new AliasValidator(); @@ -866,7 +881,8 @@ protected Node( xContentRegistry, networkService, restController, - clusterService.getClusterSettings() + clusterService.getClusterSettings(), + tracer ); Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( Plugin.class @@ -896,7 +912,8 @@ protected Node( networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), - taskHeaders + taskHeaders, + tracer ); TopNSearchTasksLogger taskConsumer = new TopNSearchTasksLogger(settings, settingsModule.getClusterSettings()); transportService.getTaskManager().registerTaskResourceConsumer(taskConsumer); @@ -1085,18 +1102,6 @@ protected Node( searchModule.getIndexSearcherExecutor(threadPool) ); - TracerFactory tracerFactory; - if (FeatureFlags.isEnabled(TELEMETRY)) { - final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); - List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); - TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); - tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); - } else { - tracerFactory = new NoopTracerFactory(); - } - tracer = tracerFactory.getTracer(); - resourcesToClose.add(tracer::close); - final List> tasksExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) .stream() .map( @@ -1261,9 +1266,10 @@ protected TransportService newTransportService( TransportInterceptor interceptor, Function localNodeFactory, ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { @@ -1348,6 +1354,10 @@ public Node start() throws NodeValidationException { injector.getInstance(PeerRecoverySourceService.class).start(); injector.getInstance(SegmentReplicationSourceService.class).start(); + final RemoteClusterStateService remoteClusterStateService = injector.getInstance(RemoteClusterStateService.class); + if (remoteClusterStateService != null) { + remoteClusterStateService.start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); gatewayMetaState.start( @@ -1359,7 +1369,8 @@ public Node start() throws NodeValidationException { injector.getInstance(MetadataUpgrader.class), injector.getInstance(PersistedClusterStateService.class), injector.getInstance(RemoteClusterStateService.class), - injector.getInstance(PersistedStateRegistry.class) + injector.getInstance(PersistedStateRegistry.class), + injector.getInstance(RemoteStoreRestoreService.class) ); if (Assertions.ENABLED) { try { diff --git a/server/src/main/java/org/opensearch/node/NodesPerformanceStats.java b/server/src/main/java/org/opensearch/node/NodesPerformanceStats.java index e28cb163170b1..7b1f45514f919 100644 --- a/server/src/main/java/org/opensearch/node/NodesPerformanceStats.java +++ b/server/src/main/java/org/opensearch/node/NodesPerformanceStats.java @@ -42,7 +42,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * Returns map of node id to perf stats */ - public Map getNodePerfStats() { + public Map getNodeIdToNodePerfStatsMap() { return nodePerfStats; } diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index de0aecc7833c9..f2f8e84f04e02 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -41,6 +41,7 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; @@ -100,7 +101,8 @@ default Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.emptyMap(); } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index ab343aabd3a82..3d6679b3ef80e 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -64,7 +64,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; @@ -683,12 +682,6 @@ public static void validateRepositoryMetadataSettings( + minVersionInCluster ); } - if (REMOTE_STORE_INDEX_SHALLOW_COPY.get(repositoryMetadataSettings) && !FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - throw new RepositoryException( - repositoryName, - "setting " + REMOTE_STORE_INDEX_SHALLOW_COPY.getKey() + " cannot be enabled, as remote store feature is not enabled." - ); - } } private static void ensureRepositoryNotInUse(ClusterState clusterState, String repository) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 744e2fbd1bfc7..7e1960171043a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -222,7 +222,7 @@ public void writeAsync( WritePriority.HIGH, (size, position) -> new OffsetRangeIndexInputStream(input, size, position), expectedChecksum, - true + ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() ) ) { ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java index 80ac811591198..d247d48389fc8 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -82,7 +82,9 @@ private static Attributes buildSpanAttributes(HttpRequest httpRequest) { private static void populateHeader(HttpRequest httpRequest, Attributes attributes) { HEADERS_TO_BE_ADDED_AS_ATTRIBUTES.forEach(x -> { - if (httpRequest.getHeaders() != null && httpRequest.getHeaders().get(x) != null) { + if (httpRequest.getHeaders() != null + && httpRequest.getHeaders().get(x) != null + && (httpRequest.getHeaders().get(x).isEmpty() == false)) { attributes.addAttribute(x, Strings.collectionToCommaDelimitedString(httpRequest.getHeaders().get(x))); } }); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java index d631d6ac01dd0..208df90f65d74 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java @@ -70,7 +70,7 @@ public Map headers(Map source) { if (source.containsKey(CURRENT_SPAN)) { final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); - if (current != null) { + if (current != null && current.getSpan() != null) { tracingTelemetry.getContextPropagator().inject(current.getSpan(), (key, value) -> headers.put(key, value)); } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java index b58bbf08f9f9b..d8cf39d4a4d09 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java @@ -92,7 +92,7 @@ Tracer getDelegateTracer() { } @Override - public Span startSpan(String spanName, Map> headers, Attributes attributes) { - return defaultTracer.startSpan(spanName, headers, attributes); + public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { + return defaultTracer.startSpan(spanCreationContext, headers); } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java new file mode 100644 index 0000000000000..9229d334dea01 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; + +import java.net.InetSocketAddress; +import java.util.Objects; + +/** + * Tracer wrapped {@link HttpChannel} + */ +public class TraceableHttpChannel implements HttpChannel { + private final HttpChannel delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableHttpChannel(HttpChannel delegate, Span span, Tracer tracer) { + this.span = Objects.requireNonNull(span); + this.delegate = Objects.requireNonNull(delegate); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return http channel + */ + public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) { + if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + return new TraceableHttpChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public void addCloseListener(ActionListener listener) { + delegate.addCloseListener(listener); + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + delegate.sendResponse(response, TraceableActionListener.create(listener, span, tracer)); + } + + @Override + public InetSocketAddress getLocalAddress() { + return delegate.getLocalAddress(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return delegate.getRemoteAddress(); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java new file mode 100644 index 0000000000000..d256c9d4d0e53 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracer wrapped {@link RestChannel} + */ +public class TraceableRestChannel implements RestChannel { + + private final RestChannel delegate; + private final Span span; + + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableRestChannel(RestChannel delegate, Span span, Tracer tracer) { + this.span = Objects.requireNonNull(span); + this.delegate = Objects.requireNonNull(delegate); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return rest channel + */ + public static RestChannel create(RestChannel delegate, Span span, Tracer tracer) { + if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + return new TraceableRestChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public XContentBuilder newBuilder() throws IOException { + return delegate.newBuilder(); + } + + @Override + public XContentBuilder newErrorBuilder() throws IOException { + return delegate.newErrorBuilder(); + } + + @Override + public XContentBuilder newBuilder(MediaType mediaType, boolean useFiltering) throws IOException { + return delegate.newBuilder(mediaType, useFiltering); + } + + @Override + public XContentBuilder newBuilder(MediaType mediaType, MediaType responseContentType, boolean useFiltering) throws IOException { + return delegate.newBuilder(mediaType, responseContentType, useFiltering); + } + + @Override + public BytesStreamOutput bytesOutput() { + return delegate.bytesOutput(); + } + + @Override + public RestRequest request() { + return delegate.request(); + } + + @Override + public boolean detailedErrorsEnabled() { + return delegate.detailedErrorsEnabled(); + } + + @Override + public void sendResponse(RestResponse response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(response); + } finally { + span.endSpan(); + } + } +} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java similarity index 63% rename from plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java rename to server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java index e996873963b1b..ee4b675d5dc30 100644 --- a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java @@ -7,6 +7,6 @@ */ /** - * A plugin that implements compression codecs with native implementation. + * This package contains classes needed for tracing requests. */ -package org.opensearch.index.codec.customcodecs; +package org.opensearch.telemetry.tracing.channels; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java new file mode 100644 index 0000000000000..3e201641a529b --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +import java.util.Objects; + +/** + * Tracer wrapped {@link ActionListener} + * @param response. + */ +public class TraceableActionListener implements ActionListener { + + private final ActionListener delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableActionListener(ActionListener delegate, Span span, Tracer tracer) { + this.delegate = Objects.requireNonNull(delegate); + this.span = Objects.requireNonNull(span); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return action listener + */ + public static ActionListener create(ActionListener delegate, Span span, Tracer tracer) { + if (FeatureFlags.isEnabled(FeatureFlags.TELEMETRY) == true) { + return new TraceableActionListener(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public void onResponse(Response response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.onResponse(response); + } finally { + span.endSpan(); + } + + } + + @Override + public void onFailure(Exception e) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.onFailure(e); + } finally { + span.setError(e); + span.endSpan(); + } + + } +} diff --git a/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy b/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java similarity index 62% rename from plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy rename to server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java index 8161010cfa897..5dcb570c2bb2e 100644 --- a/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java @@ -6,6 +6,7 @@ * compatible open source license. */ -grant codeBase "${codebase.zstd-jni}" { - permission java.lang.RuntimePermission "loadLibrary.*"; -}; +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.listener; diff --git a/server/src/main/java/org/opensearch/throttling/tracker/AverageIOUsageTracker.java b/server/src/main/java/org/opensearch/throttling/tracker/AverageIOUsageTracker.java new file mode 100644 index 0000000000000..aa68b865c7964 --- /dev/null +++ b/server/src/main/java/org/opensearch/throttling/tracker/AverageIOUsageTracker.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.throttling.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; + +public class AverageIOUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger logger = LogManager.getLogger(AverageCpuUsageTracker.class); + + private Map previousIOTimeMap; + + private FsService fsService; + + public AverageIOUsageTracker( + ThreadPool threadPool, + TimeValue pollingInterval, + TimeValue windowDuration, + ClusterSettings clusterSettings, + FsService fsService + ) { + super(threadPool, pollingInterval, windowDuration); + setFsService(fsService); + clusterSettings.addSettingsUpdateConsumer( + PerformanceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + this::setWindowDuration + ); + } + + public FsService getFsService() { + return fsService; + } + + public void setFsService(FsService fsService) { + this.fsService = fsService; + } + + class DevicePreviousStats { + public long ioTime; + public double readTime; + public double writeTime; + public double readOps; + public double writeOps; + public DevicePreviousStats(long ioTime, double readTime, double writeTime, double readOps, double writeOps) { + this.ioTime = ioTime; + this.readTime = readTime; + this.writeTime = writeTime; + this.readOps = readOps; + this.writeOps = writeOps; + } + } + + private long monitorIOUtilisation() { + logger.info("IO stats is triggered"); + Map currentIOTimeMap = new HashMap<>(); + for (FsInfo.DeviceStats devicesStat : this.fsService.stats().getIoStats().getDevicesStats()) { + logger.info("Device Id: {} , IO time : {}", devicesStat.getDeviceName(), devicesStat.getCurrentIOTime()); + logger.info("Read Latency : {} , Write latency : {} ", devicesStat.getCurrentReadLatency(), devicesStat.getCurrentWriteLatency()); + logger.info("Write time : {} , Read time : {}", devicesStat.getCurrentWriteTime(), devicesStat.getCurrentReadTime()); + logger.info("Read latency diff : {} , Write latency diff : {}", devicesStat.getReadLatency(), devicesStat.getWriteLatency()); + logger.info("Read time diff : {}, Write time diff : {}", devicesStat.getReadTime(), devicesStat.getWriteTime()); + + logger.info("Read latency : " + devicesStat.getNewReadLatency() + " Write latency : " + devicesStat.getNewWriteLatency()); + + + if (previousIOTimeMap.containsKey(devicesStat.getDeviceName())){ + long ioSpentTime = devicesStat.getCurrentIOTime() - previousIOTimeMap.get(devicesStat.getDeviceName()).ioTime; + double ioUsePercent = (double) (ioSpentTime * 100) / (10 * 1000); + //ioExecutionEWMA.addValue(ioUsePercent / 100.0); + + double readOps = devicesStat.currentReadOperations() - previousIOTimeMap.get(devicesStat.getDeviceName()).readOps; + double writeOps = devicesStat.currentWriteOpetations() - previousIOTimeMap.get(devicesStat.getDeviceName()).writeOps; + + double readTime = devicesStat.getCurrentReadTime() - previousIOTimeMap.get(devicesStat.getDeviceName()).readTime; + double writeTime = devicesStat.getWriteTime() - previousIOTimeMap.get(devicesStat.getDeviceName()).writeTime; + + double readLatency = readOps / readTime; + double wrieLatency = writeOps / writeTime; + + logger.info("read ops : {} , writeops : {} , readtime: {} , writetime: {}", readOps, writeOps, readTime, writeTime); + logger.info("Read latency final : " + readLatency + "write latency final : " + wrieLatency); + + } + + DevicePreviousStats ps = new DevicePreviousStats(devicesStat.getCurrentIOTime(), devicesStat.getCurrentReadTime(), + devicesStat.getCurrentWriteTime(), devicesStat.currentReadOperations(), devicesStat.currentWriteOpetations()); + + currentIOTimeMap.put(devicesStat.getDeviceName(), ps); + return devicesStat.readOperations() + devicesStat.writeOperations(); + } + previousIOTimeMap = currentIOTimeMap; + return 0; + + } + + @Override + public long getUsage() { + return monitorIOUtilisation(); + } +} diff --git a/server/src/main/java/org/opensearch/throttling/tracker/NodePerformanceTracker.java b/server/src/main/java/org/opensearch/throttling/tracker/NodePerformanceTracker.java index 636388317759f..6fcf668818968 100644 --- a/server/src/main/java/org/opensearch/throttling/tracker/NodePerformanceTracker.java +++ b/server/src/main/java/org/opensearch/throttling/tracker/NodePerformanceTracker.java @@ -14,6 +14,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsService; import org.opensearch.node.PerformanceCollectorService; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -31,24 +32,30 @@ public class NodePerformanceTracker extends AbstractLifecycleComponent { private final ClusterSettings clusterSettings; private AverageCpuUsageTracker cpuUsageTracker; private AverageMemoryUsageTracker memoryUsageTracker; + + private AverageIOUsageTracker ioUsageTracker; private PerformanceCollectorService performanceCollectorService; private PerformanceTrackerSettings performanceTrackerSettings; private static final Logger logger = LogManager.getLogger(NodePerformanceTracker.class); private final TimeValue interval; + private final FsService fsService; + public static final String LOCAL_NODE = "LOCAL"; public NodePerformanceTracker( PerformanceCollectorService performanceCollectorService, ThreadPool threadPool, Settings settings, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + FsService fsService ) { this.performanceCollectorService = performanceCollectorService; this.threadPool = threadPool; this.clusterSettings = clusterSettings; this.performanceTrackerSettings = new PerformanceTrackerSettings(settings, clusterSettings); + this.fsService = fsService; interval = new TimeValue(performanceTrackerSettings.getRefreshInterval()); initialize(); } @@ -61,6 +68,10 @@ private double getAverageMemoryUsed() { return memoryUsageTracker.getAverage(); } + private double getAverageIOUsed() { + return ioUsageTracker.getAverage(); + } + private void setCpuUtilizationPercent(double cpuUtilizationPercent) { this.cpuUtilizationPercent = cpuUtilizationPercent; } @@ -79,7 +90,7 @@ public double getMemoryUtilizationPercent() { void doRun() { setCpuUtilizationPercent(getAverageCpuUsed()); - setMemoryUtilizationPercent(getAverageMemoryUsed()); + setMemoryUtilizationPercent(getAverageIOUsed()); performanceCollectorService.addNodePerfStatistics( LOCAL_NODE, getCpuUtilizationPercent(), @@ -102,6 +113,14 @@ void initialize() { performanceTrackerSettings.getMemoryWindowDuration(), clusterSettings ); + + ioUsageTracker = new AverageIOUsageTracker( + threadPool, + performanceTrackerSettings.getIoPollingInterval(), + performanceTrackerSettings.getIoWindowDuration(), + clusterSettings, + fsService + ); } @Override @@ -110,11 +129,12 @@ protected void doStart() { try { doRun(); } catch (Exception e) { - logger.debug("failure in node performance tracker : {}", e); + logger.debug("failure in node performance tracker", e); } }, interval, ThreadPool.Names.GENERIC); cpuUsageTracker.doStart(); memoryUsageTracker.doStart(); + ioUsageTracker.doStart(); } @Override @@ -124,11 +144,13 @@ protected void doStop() { } cpuUsageTracker.doStop(); memoryUsageTracker.doStop(); + ioUsageTracker.doStop(); } @Override protected void doClose() throws IOException { cpuUsageTracker.doClose(); memoryUsageTracker.doClose(); + ioUsageTracker.doClose(); } } diff --git a/server/src/main/java/org/opensearch/throttling/tracker/PerformanceTrackerSettings.java b/server/src/main/java/org/opensearch/throttling/tracker/PerformanceTrackerSettings.java index cd670c0b3e887..cce796fd86b96 100644 --- a/server/src/main/java/org/opensearch/throttling/tracker/PerformanceTrackerSettings.java +++ b/server/src/main/java/org/opensearch/throttling/tracker/PerformanceTrackerSettings.java @@ -13,12 +13,18 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +/** + * Settings related to node performance trackers + */ public class PerformanceTrackerSettings { private static class Defaults { private static final long POLLING_INTERVAL = 500; private static final long WINDOW_DURATION = 30; private static final long REFRESH_INTERVAL = 1000; + + private static final long IO_POLLING_INTERVAL = 1000; + private static final long IO_WINDOW_DURATION = 60; } public static final Setting REFRESH_INTERVAL_MILLIS = Setting.longSetting( @@ -52,21 +58,39 @@ private static class Defaults { Setting.Property.NodeScope ); + public static final Setting GLOBAL_IO_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.perf_tracker.global_io_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.IO_POLLING_INTERVAL), + Setting.Property.NodeScope + ); + public static final Setting GLOBAL_IO_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.perf_tracker.global_io_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.IO_WINDOW_DURATION), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile long refreshInterval; private volatile TimeValue cpuWindowDuration; private volatile TimeValue cpuPollingInterval; private volatile TimeValue memoryWindowDuration; private volatile TimeValue memoryPollingInterval; + private volatile TimeValue ioWindowDuration; + private volatile TimeValue ioPollingInterval; + public PerformanceTrackerSettings(Settings settings, ClusterSettings clusterSettings) { this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.ioWindowDuration = GLOBAL_IO_WINDOW_DURATION_SETTING.get(settings); + this.ioPollingInterval = GLOBAL_IO_AC_POLLING_INTERVAL_SETTING.get(settings); this.refreshInterval = REFRESH_INTERVAL_MILLIS.get(settings); clusterSettings.addSettingsUpdateConsumer(GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, this::setCpuWindowDuration); clusterSettings.addSettingsUpdateConsumer(GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, this::setMemoryWindowDuration); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_IO_WINDOW_DURATION_SETTING, this::setIOWindowDuration); } public TimeValue getCpuWindowDuration() { @@ -96,4 +120,18 @@ public void setCpuWindowDuration(TimeValue cpuWindowDuration) { public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { this.memoryWindowDuration = memoryWindowDuration; } + + public void setIOWindowDuration(TimeValue ioWindowDuration) { + this.ioWindowDuration = ioWindowDuration; + } + + public TimeValue getIoWindowDuration() { return ioWindowDuration; } + + public void setIoPollingInterval(TimeValue ioPollingInterval) { + this.ioPollingInterval = ioPollingInterval; + } + + public TimeValue getIoPollingInterval() { + return ioPollingInterval; + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index ff604d3435f21..52274872e8cc8 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -66,6 +66,7 @@ import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -132,11 +133,11 @@ protected boolean removeEldestEntry(Map.Entry eldest) { // tracer log private final Logger tracerLog; - volatile String[] tracerLogInclude; volatile String[] tracerLogExclude; private final RemoteClusterService remoteClusterService; + private final Tracer tracer; /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; @@ -190,7 +191,8 @@ public TransportService( TransportInterceptor transportInterceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { this( settings, @@ -200,7 +202,8 @@ public TransportService( localNodeFactory, clusterSettings, taskHeaders, - new ClusterConnectionManager(settings, transport) + new ClusterConnectionManager(settings, transport), + tracer ); } @@ -212,7 +215,8 @@ public TransportService( Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders, - ConnectionManager connectionManager + ConnectionManager connectionManager, + Tracer tracer ) { this.transport = transport; transport.setSlowLogThreshold(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings)); @@ -227,6 +231,7 @@ public TransportService( this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); + this.tracer = tracer; remoteClusterService = new RemoteClusterService(settings, this); responseHandlers = transport.getResponseHandlers(); if (clusterSettings != null) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index 26b0c5ef05cdc..a015e671f4872 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -55,6 +55,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -139,7 +140,8 @@ public void setupForTest() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java index 535080b196f29..10e4ab6388be4 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java @@ -49,6 +49,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -111,7 +112,8 @@ public void setupForTest() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); new TransportClearVotingConfigExclusionsAction( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 414910a3cf5e6..06a83e6ffd984 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -399,9 +399,9 @@ public void testSerialization() throws IOException { if (nodesPerformanceStats == null) { assertNull(deserializedNodePerfStats); } else { - nodesPerformanceStats.getNodePerfStats().forEach((k, v) -> { - NodePerformanceStatistics aPerfStats = nodesPerformanceStats.getNodePerfStats().get(k); - NodePerformanceStatistics bPerfStats = nodesPerformanceStats.getNodePerfStats().get(k); + nodesPerformanceStats.getNodeIdToNodePerfStatsMap().forEach((k, v) -> { + NodePerformanceStatistics aPerfStats = nodesPerformanceStats.getNodeIdToNodePerfStatsMap().get(k); + NodePerformanceStatistics bPerfStats = nodesPerformanceStats.getNodeIdToNodePerfStatsMap().get(k); assertEquals(aPerfStats.getMemoryUtilizationPercent(), bPerfStats.getMemoryUtilizationPercent(), 0.0); assertEquals(aPerfStats.getCpuUtilizationPercent(), bPerfStats.getCpuUtilizationPercent(), 0.0); assertEquals(aPerfStats.getTimestamp(), bPerfStats.getTimestamp()); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 6a508f5843b22..07e149dd72164 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -60,6 +60,7 @@ import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.RunnableTaskExecutionListener; @@ -216,7 +217,8 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override protected TaskManager createTaskManager( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java index 75707f2a7853a..ed73c2ef6ace5 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java @@ -23,7 +23,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -32,7 +31,7 @@ import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportService; @@ -87,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); when(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(any())).thenReturn(mock(RemoteSegmentTransferTracker.class)); @@ -113,7 +113,6 @@ public void tearDown() throws Exception { } public void testAllShardCopies() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); RoutingTable routingTable = RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build(); Metadata metadata = Metadata.builder().put(remoteStoreIndexMetadata, false).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build(); @@ -133,7 +132,6 @@ public void testAllShardCopies() throws Exception { } public void testOnlyLocalShards() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); String[] concreteIndices = new String[] { INDEX.getName() }; RoutingTable routingTable = spy(RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build()); doReturn(new PlainShardsIterator(routingTable.allShards(INDEX.getName()).stream().map(Mockito::spy).collect(Collectors.toList()))) @@ -161,7 +159,6 @@ public void testOnlyLocalShards() throws Exception { } public void testOnlyRemoteStoreEnabledShardCopies() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); Index NEW_INDEX = new Index("newIndex", "newUUID"); IndexMetadata indexMetadataWithoutRemoteStore = IndexMetadata.builder(NEW_INDEX.getName()) .settings( diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 371f326617b61..ef26bc225b0c7 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -62,6 +62,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -139,7 +140,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java index 8f4e0e3b75e32..2d9ec2b6d3c02 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java index d8e0f96292e27..f2b6688716e70 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -45,6 +45,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -108,7 +109,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 1229584fa99ac..098bd8e8d8cfe 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -62,6 +62,7 @@ import org.opensearch.index.VersionType; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.CapturingTransport; @@ -153,7 +154,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index b9dca5f2573f3..829eee45cac5b 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -57,6 +57,7 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.CapturingTransport; @@ -125,7 +126,8 @@ private TransportBulkAction createAction(boolean controlled, AtomicLong expected TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java index 0503bb39427a1..52443e695e014 100644 --- a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java +++ b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java @@ -62,6 +62,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -168,7 +169,8 @@ public static void beforeClass() throws Exception { randomBase64UUID() ), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/main/MainActionTests.java b/server/src/test/java/org/opensearch/action/main/MainActionTests.java index fc9a83c10dbbc..b43dc2a80cd37 100644 --- a/server/src/test/java/org/opensearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainActionTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; @@ -109,7 +110,8 @@ public void testMainActionClusterAvailable() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); TransportMainAction action = new TransportMainAction(settings, transportService, mock(ActionFilters.class), clusterService); AtomicReference responseRef = new AtomicReference<>(); diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index e88cc4c5b1d52..fbac465f946f4 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -64,6 +64,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -145,7 +146,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 897c2a6198eac..2643aa5b6db01 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -32,6 +32,7 @@ import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -180,7 +181,7 @@ public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -273,7 +274,7 @@ public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -366,7 +367,7 @@ public void testUpdatePitFailureForNodeDrop() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -462,7 +463,7 @@ public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java index 173bee40d9ae8..94ba5b0a8768b 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java @@ -49,6 +49,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -153,7 +154,8 @@ private TransportMultiSearchAction createTransportMultiSearchAction(boolean cont TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 6713faf78a58c..8d3cdc070c695 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -27,6 +27,7 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -141,7 +142,7 @@ public void testDeletePitSuccess() throws InterruptedException, ExecutionExcepti Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -204,7 +205,7 @@ public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionExce Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -279,7 +280,7 @@ public void testDeletePitWhenNodeIsDown() throws InterruptedException, Execution Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -342,7 +343,7 @@ public void testDeletePitWhenAllNodesAreDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -400,7 +401,7 @@ public void testDeletePitFailure() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -465,7 +466,7 @@ public void testDeleteAllPitWhenNodeIsDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -542,7 +543,7 @@ public void testDeleteAllPitWhenAllNodesAreDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -615,7 +616,7 @@ public void testDeleteAllPitFailure() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 6bb7401615b8f..48970e2b96add 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -51,6 +51,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -87,7 +88,8 @@ public void testParentTaskId() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { @@ -151,7 +153,8 @@ public void testBatchExecute() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index e278f088508fc..c4bf8a5d87172 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -75,6 +75,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.sort.SortBuilders; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -234,7 +235,14 @@ public void testMergeShardsIterators() { } public void testProcessRemoteShards() { - try (TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + TransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { RemoteClusterService service = transportService.getRemoteClusterService(); assertFalse(service.isCrossClusterSearchEnabled()); Map searchShardsResponseMap = new HashMap<>(); @@ -451,7 +459,9 @@ public void testCCSRemoteReduceMergeFails() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); Function reduceContext = finalReduce -> null; - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -507,7 +517,9 @@ public void testCCSRemoteReduce() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; int totalClusters = numClusters + (local ? 1 : 0); TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -748,7 +760,9 @@ public void testCollectSearchShards() throws Exception { Settings.Builder builder = Settings.builder(); MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); Settings settings = builder.build(); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 22a07f15fc5ed..4305151965ab6 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -68,6 +68,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -237,7 +238,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 56a31f4d3cbd7..9ae1310a8b15c 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -63,6 +63,7 @@ import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 28f01d0e6ea4a..445934b0ccdfd 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -209,7 +210,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java index 9022b32630d5a..77c9c64ad6611 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java @@ -62,6 +62,7 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -125,7 +126,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 6b717fe187078..0bee99f4d5656 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -87,6 +87,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.cluster.ClusterStateChanges; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.MockTransportService; @@ -195,7 +196,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -1325,7 +1327,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 088c2b0eb14f4..839ac7ab5bb92 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -65,6 +65,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; @@ -232,7 +233,8 @@ public String executor() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, bta -> node1, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java index 06a13976756a9..4a2185d1558f7 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java @@ -37,6 +37,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -102,7 +103,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index c6421bfa77e70..9d2069ac16190 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -65,6 +65,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -288,7 +289,8 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -407,7 +409,8 @@ public void testPrimaryClosedDoesNotFailShard() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -461,7 +464,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ), TransportWriteActionTests.this.clusterService, null, diff --git a/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 4a540f2273739..118b4e596fc66 100644 --- a/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -57,6 +57,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -188,7 +189,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java index b01ac39dc515e..0868421fe1d41 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java @@ -61,6 +61,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -107,7 +108,8 @@ public static void beforeClass() throws Exception { randomBase64UUID() ), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java index a5017867c2e74..4cf82f1dabab3 100644 --- a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java @@ -50,6 +50,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -552,7 +553,8 @@ private TestTransportService(Transport transport, ThreadPool threadPool) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 9e8ef3a325d92..efe91de1ae1a8 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -57,6 +57,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -155,7 +156,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java index 0cde0262a8f38..775d113f986ca 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java @@ -30,6 +30,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -90,8 +91,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("nodes1"), null, - Collections.emptySet() - + Collections.emptySet(), + NoopTracer.INSTANCE ); Settings.Builder settingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java index 822d9c416d8d0..7910daebb00de 100644 --- a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java @@ -16,6 +16,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -64,7 +65,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java index 8afe343ccd56d..b68f0f2375354 100644 --- a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java @@ -29,6 +29,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -82,7 +83,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java index 03b35fe8c9f36..0b84eb19f4264 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportRequest; @@ -100,7 +101,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index be7b32d4aef11..bfb225854979b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportRequest; @@ -101,7 +102,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index ef56d70d6153c..d1c2dda615992 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -925,19 +925,18 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final VotingConfiguration initialConfig = VotingConfiguration.of(node1); final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState)) - .thenReturn( - new ClusterMetadataManifest( - 0L, - 0L, - randomAlphaOfLength(10), - randomAlphaOfLength(10), - Version.CURRENT, - randomAlphaOfLength(10), - false, - Collections.emptyList() - ) - ); + final ClusterMetadataManifest manifest = new ClusterMetadataManifest( + 0L, + 0L, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + Version.CURRENT, + randomAlphaOfLength(10), + false, + Collections.emptyList(), + randomAlphaOfLength(10) + ); + Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState)).thenReturn(manifest); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); @@ -964,26 +963,11 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); coordinationState.handlePrePublish(clusterState); - Mockito.verifyNoInteractions(remoteClusterStateService); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState); assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); - final ClusterState clusterState2 = clusterState(0L, 1L, node1, initialConfig, initialConfig, 42L); - final ClusterMetadataManifest manifest2 = new ClusterMetadataManifest( - 0L, - 1L, - randomAlphaOfLength(10), - randomAlphaOfLength(10), - Version.CURRENT, - randomAlphaOfLength(10), - false, - Collections.emptyList() - ); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState2)).thenReturn(manifest2); - coordinationState.handlePrePublish(clusterState2); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState2); - coordinationState.handlePreCommit(); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(clusterState2, manifest2); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(clusterState, manifest); } public static CoordinationState createCoordinationState( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index 30512fd96088e..c152a1606681e 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; import org.opensearch.test.OpenSearchTestCase; @@ -123,7 +124,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -285,7 +287,8 @@ public String toString() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -371,7 +374,8 @@ public String toString() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -475,7 +479,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> follower, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -546,7 +551,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> follower, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -701,7 +707,8 @@ public void testPreferClusterManagerNodes() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final FollowersChecker followersChecker = new FollowersChecker( Settings.EMPTY, diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index 3fa5768f4f614..78c3b5d45a9ab 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -50,6 +50,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; @@ -96,7 +97,8 @@ public void testJoinDeduplication() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); JoinHelper joinHelper = new JoinHelper( Settings.EMPTY, @@ -281,7 +283,8 @@ public void testJoinFailureOnUnhealthyNodes() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); AtomicReference nodeHealthServiceStatus = new AtomicReference<>(new StatusInfo(UNHEALTHY, "unhealthy-info")); JoinHelper joinHelper = new JoinHelper( @@ -472,7 +475,8 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } else { transportService = mockTransport.createTransportService( @@ -481,7 +485,8 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } JoinHelper joinHelper = new JoinHelper( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index 180d0ffe649e2..8915f4c5c1274 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; import org.opensearch.test.OpenSearchTestCase; @@ -165,7 +166,8 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -281,7 +283,8 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -393,7 +396,8 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -438,7 +442,8 @@ public void testLeaderBehaviour() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 766a20fda8d28..d94f3fb304fe2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -61,6 +61,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -247,7 +248,8 @@ protected void onSendRequest( TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> initialState.nodes().getLocalNode(), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(term, initialState)); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java index 8f779097d50d4..5ddf614db3334 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.ConnectTransportException; @@ -136,7 +137,8 @@ public void handleRemoteError(long requestId, Throwable t) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java index 2ff78d3b68082..6d94054afdea2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -43,6 +43,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; @@ -68,7 +69,8 @@ public void testDiffSerializationFailure() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final PublicationTransportHandler handler = new PublicationTransportHandler( transportService, diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 7d50ab5dfeb1b..627f31502a417 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -28,6 +28,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -91,7 +92,8 @@ public void setTransportServiceAndDefaultClusterState() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("node1"), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder(); diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 208bbb6a7a96d..6c15d1dc54aea 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -28,6 +28,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -103,7 +104,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("node1"), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index c1c14188db97c..795dc8a624e38 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -59,6 +59,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.transport.CapturingTransport; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 86e154c547e07..8adaae6d230cd 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -61,7 +61,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -79,7 +78,6 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -1221,7 +1219,6 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); @@ -1253,7 +1250,6 @@ public void testRemoteStoreImplicitOverrideReplicationTypeToSegmentForRemoteStor .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); @@ -1285,7 +1281,6 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = aggregateIndexSettings( @@ -1337,7 +1332,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_STORE_ENABLED)) + is(String.format(Locale.ROOT, "private index setting [%s] can not be set explicitly", SETTING_REMOTE_STORE_ENABLED)) ); })); } @@ -1371,7 +1366,13 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)) + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + ) + ) ); })); } @@ -1404,7 +1405,13 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)) + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + ) ); })); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index b1ba52204c47a..5c0bdc8547f8b 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -31,6 +31,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -94,8 +95,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("nodes1"), null, - Collections.emptySet() - + Collections.emptySet(), + NoopTracer.INSTANCE ); Settings.Builder settingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index e0c44e3516e7b..0ebfe02dc7641 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -1143,6 +1143,28 @@ public void testNonMillisecondsBasedUnitCalendarRoundingSize() { assertThat(prepared.roundingSize(thirdQuarter, Rounding.DateTimeUnit.HOUR_OF_DAY), closeTo(2208.0, 0.000001)); } + public void testArrayRoundingImplementations() { + int length = randomIntBetween(1, 256); + long[] values = new long[length]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 100); + } + + Rounding.Prepared binarySearchImpl = new Rounding.BinarySearchArrayRounding(values, length, null); + Rounding.Prepared linearSearchImpl = new Rounding.BidirectionalLinearSearchArrayRounding(values, length, null); + + for (int i = 0; i < 100000; i++) { + long key = values[0] + (randomNonNegativeLong() % (100 + values[length - 1] - values[0])); + assertEquals(binarySearchImpl.round(key), linearSearchImpl.round(key)); + } + + AssertionError exception = expectThrows(AssertionError.class, () -> { binarySearchImpl.round(values[0] - 1); }); + assertEquals("utcMillis must be after " + values[0], exception.getMessage()); + + exception = expectThrows(AssertionError.class, () -> { linearSearchImpl.round(values[0] - 1); }); + assertEquals("utcMillis must be after " + values[0], exception.getMessage()); + } + private void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, ZoneId tz) { assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); long millisPerMinute = 60_000; diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java index ed08a436d3ca0..21b7b47390a9b 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -8,6 +8,7 @@ package org.opensearch.common.blobstore.stream.read.listener; +import org.apache.lucene.tests.util.LuceneTestCase.SuppressFileSystems; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.blobstore.stream.read.ReadContext; @@ -32,6 +33,12 @@ import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; +/* + WindowsFS tries to simulate file handles in a best case simulation. + The deletion for the open file on an actual Windows system will be performed as soon as the last handle + is closed, which this simulation does not account for. Preventing use of WindowsFS for these tests. + */ +@SuppressFileSystems("WindowsFS") public class ReadContextListenerTests extends OpenSearchTestCase { private Path path; @@ -70,7 +77,6 @@ public void testReadContextListener() throws InterruptedException, IOException { assertEquals(NUMBER_OF_PARTS * PART_SIZE, Files.size(fileLocation)); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9776") public void testReadContextListenerFailure() throws Exception { Path fileLocation = path.resolve(UUID.randomUUID().toString()); List blobPartStreams = initializeBlobPartStreams(); @@ -100,7 +106,7 @@ public int available() { readContextListener.onResponse(readContext); countDownLatch.await(); - assertBusy(() -> { assertFalse(Files.exists(fileLocation)); }); + assertFalse(Files.exists(fileLocation)); } public void testReadContextListenerException() { diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 2e3c98db6fa81..d28a4a51999e6 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,6 +47,8 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -143,7 +145,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap("custom", custom); } @@ -188,7 +191,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { Map> supplierMap = new HashMap<>(); supplierMap.put("custom", custom); @@ -231,7 +235,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { Map> supplierMap = new HashMap<>(); supplierMap.put("custom", custom); @@ -313,7 +318,8 @@ private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugi xContentRegistry(), null, new NullDispatcher(), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ); } } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index 7c0dca3803cb2..b33ebf8333b36 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -48,6 +48,7 @@ import org.opensearch.gateway.GatewayMetaState; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -96,7 +97,7 @@ default Map> getSeedHostProviders( public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); clusterManagerService = mock(ClusterManagerService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); diff --git a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java index ac2bcfe92ebaf..688a532a61c4a 100644 --- a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -114,7 +115,8 @@ public BoundTransportAddress boundAddress() { transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null + null, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java index cf53bd9251b65..0d694bcfa135b 100644 --- a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -44,6 +44,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -122,7 +123,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, address -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 76520f9684c23..f861ab90896db 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.PeerFinder.TransportAddressConnector; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; @@ -242,7 +243,8 @@ public void setup() { boundTransportAddress -> localNode, null, emptySet(), - connectionManager + connectionManager, + NoopTracer.INSTANCE ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java index d2a1be87388ad..dc0829adac101 100644 --- a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java @@ -48,6 +48,7 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -203,7 +204,8 @@ public BoundTransportAddress boundAddress() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final List transportAddresses = SeedHostsResolver.resolveHostsLists( @@ -261,7 +263,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); @@ -326,7 +329,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(3, 5)); @@ -402,7 +406,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); recreateSeedHostsResolver( @@ -446,7 +451,8 @@ public BoundTransportAddress boundAddress() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final List transportAddresses = SeedHostsResolver.resolveHostsLists( diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 697cc92e82a5e..57883f1e5914a 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -45,6 +45,7 @@ import org.opensearch.identity.IdentityService; import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.rest.RestController; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; @@ -130,7 +131,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); actionModule = mock(ActionModule.class); extAwarePlugin = new ExtensionAwarePlugin() { @@ -761,7 +763,8 @@ public void testRegisterHandler() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) ); extensionsManager.initializeServicesAndRestHandler( diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java index 3a7e994f180d4..1dede94c68208 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -24,6 +24,7 @@ import org.opensearch.extensions.AcknowledgedResponse; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.rest.RestSendToExtensionActionTests; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.transport.MockTransportService; @@ -83,7 +84,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); discoveryExtensionNode = new DiscoveryExtensionNode( "firstExtension", diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index 455c9eef19bc7..87767978147cd 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -22,6 +22,7 @@ import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.ExtensionsSettings; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; @@ -81,7 +82,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index 0e75f0a8dacff..992e37b3f120a 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -32,6 +32,7 @@ import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestHandler.Route; import org.opensearch.rest.RestRequest.Method; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -94,7 +95,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); discoveryExtensionNode = new DiscoveryExtensionNode( "firstExtension", diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 98a9c0ded2d9d..486717faaf864 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.coordination.CoordinationState; +import org.opensearch.cluster.coordination.CoordinationState.PersistedState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.IndexMetadata; @@ -51,6 +52,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; @@ -63,8 +65,11 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.PersistedClusterStateService.Writer; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.fs.FsRepository; @@ -87,6 +92,7 @@ import org.mockito.Mockito; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -96,8 +102,12 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class GatewayMetaStatePersistedStateTests extends OpenSearchTestCase { @@ -108,6 +118,8 @@ public class GatewayMetaStatePersistedStateTests extends OpenSearchTestCase { private DiscoveryNode localNode; private BigArrays bigArrays; + private MockGatewayMetaState gateway; + @Override public void setUp() throws Exception { bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -127,11 +139,13 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { nodeEnvironment.close(); + IOUtils.close(gateway); super.tearDown(); } - private CoordinationState.PersistedState newGatewayPersistedState() { - final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); + private CoordinationState.PersistedState newGatewayPersistedState() throws IOException { + IOUtils.close(gateway); + gateway = new MockGatewayMetaState(localNode, bigArrays); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); gateway.start(settings, nodeEnvironment, xContentRegistry(), persistedStateRegistry); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); @@ -437,7 +451,10 @@ public void testDataOnlyNodePersistence() throws Exception { cleanup.add(gateway); final TransportService transportService = mock(TransportService.class); TestThreadPool threadPool = new TestThreadPool("testMarkAcceptedConfigAsCommittedOnDataOnlyNode"); - cleanup.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + cleanup.add(() -> { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + threadPool.shutdown(); + }); when(transportService.getThreadPool()).thenReturn(threadPool); ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( @@ -464,7 +481,8 @@ public void testDataOnlyNodePersistence() throws Exception { ), settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - () -> 0L + () -> 0L, + threadPool ); } else { return null; @@ -479,7 +497,8 @@ public void testDataOnlyNodePersistence() throws Exception { null, persistedClusterStateService, remoteClusterStateServiceSupplier.get(), - new PersistedStateRegistry() + new PersistedStateRegistry(), + null ); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.AsyncLucenePersistedState.class)); @@ -712,7 +731,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(clusterState); - Mockito.verify(remoteClusterStateService, times(0)).writeFullMetadata(Mockito.any()); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -723,7 +742,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(secondClusterState); - Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(Mockito.any()); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -748,20 +767,19 @@ public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOExcept Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() ); - remotePersistedState.setLastAcceptedState(clusterState); - - final ClusterState secondClusterState = createClusterState( - randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() - ); - - assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(secondClusterState)); + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); } public void testGatewayForRemoteState() throws IOException { MockGatewayMetaState gateway = null; try { - gateway = new MockGatewayMetaState(localNode, bigArrays); + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + gateway = new MockGatewayMetaState(localNode, bigArrays, remoteClusterStateService, remoteStoreRestoreService); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); String stateRepoTypeAttributeKey = String.format( @@ -798,6 +816,173 @@ public void testGatewayForRemoteState() throws IOException { } } + public void testGatewayForRemoteStateForInitialBootstrap() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn(ClusterState.UNKNOWN_UUID); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); // change this + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(ClusterState.EMPTY_STATE.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReplacement() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + final ClusterState previousState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put( + IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(), + false + ) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn( + previousState.metadata().clusterUUID() + ); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, previousState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); + verify(remoteStoreRestoreService).restore(any(), any(), anyBoolean(), any()); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(previousState.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReboot() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + gateway = newGatewayForRemoteState(remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, clusterState); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verifyNoInteractions(remoteClusterStateService); + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + logger.info("lucene state metadata: {}", lucenePersistedState.getLastAcceptedState().toString()); + logger.info("initial metadata: {}", clusterState.toString()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().size(), equalTo(1)); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().get("test-index1"), equalTo(indexMetadata)); + } finally { + IOUtils.close(gateway); + } + } + + private MockGatewayMetaState newGatewayForRemoteState( + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService, + PersistedStateRegistry persistedStateRegistry, + ClusterState currentState + ) throws IOException { + MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + Settings settingWithRemoteStateEnabled = Settings.builder() + .put(settings) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + final TransportService transportService = mock(TransportService.class); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService( + nodeEnvironment, + xContentRegistry(), + getBigArrays(), + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L + ); + if (!ClusterState.EMPTY_STATE.equals(currentState)) { + Writer writer = persistedClusterStateService.createWriter(); + writer.writeFullStateAndCommit(currentState.term(), currentState); + writer.close(); + } + final MetaStateService metaStateService = mock(MetaStateService.class); + when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), ClusterState.EMPTY_STATE.metadata())); + gateway.start( + settingWithRemoteStateEnabled, + transportService, + clusterService, + metaStateService, + null, + null, + persistedClusterStateService, + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService + ); + return gateway; + } + private static BigArrays getBigArrays() { return usually() ? BigArrays.NON_RECYCLING_INSTANCE diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 449af91c34531..9f8dde5ba9d45 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -36,7 +36,8 @@ public void testClusterMetadataManifestXContent() throws IOException { Version.CURRENT, "test-node-id", false, - Collections.singletonList(uploadedIndexMetadata) + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid" ); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -58,7 +59,8 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { Version.CURRENT, "B10RX1f5RJenMQvYccCgSQ", true, - randomUploadedIndexMetadataList() + randomUploadedIndexMetadataList(), + "yfObdx8KSMKKrXf8UyHhM" ); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -165,6 +167,22 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { } ); } + { // Mutate Previous cluster UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.previousClusterUUID("vZX62DCQEOzGXlxXCrEu"); + return builder.build(); + } + ); + + } } private List randomUploadedIndexMetadataList() { diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index da1f97f7042f1..9f5067420aab1 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -41,6 +41,9 @@ import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -54,13 +57,17 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.function.Supplier; import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -78,6 +85,7 @@ public class RemoteClusterStateServiceTests extends OpenSearchTestCase { private RepositoriesService repositoriesService; private BlobStoreRepository blobStoreRepository; private BlobStore blobStore; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); @Before public void setup() { @@ -113,17 +121,25 @@ public void setup() { repositoriesServiceSupplier, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - () -> 0L + () -> 0L, + threadPool ); } + @After + public void teardown() throws Exception { + super.tearDown(); + remoteClusterStateService.close(); + threadPool.shutdown(); + } + public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().build(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); Assert.assertThat(manifest, nullValue()); } - public void testFailInitializationWhenRemoteStateDisabled() throws IOException { + public void testFailInitializationWhenRemoteStateDisabled() { final Settings settings = Settings.builder().build(); assertThrows( AssertionError.class, @@ -132,27 +148,27 @@ public void testFailInitializationWhenRemoteStateDisabled() throws IOException { repositoriesServiceSupplier, settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - () -> 0L + () -> 0L, + threadPool ) ); } - public void testFailWriteFullMetadataWhenRepositoryNotSet() { - final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + public void testFailInitializeWhenRepositoryNotSet() { doThrow(new RepositoryMissingException("repository missing")).when(repositoriesService).repository("remote_store_repository"); - assertThrows(RepositoryMissingException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState)); + assertThrows(RepositoryMissingException.class, () -> remoteClusterStateService.start()); } public void testFailWriteFullMetadataWhenNotBlobRepository() { final FilterRepository filterRepository = mock(FilterRepository.class); when(repositoriesService.repository("remote_store_repository")).thenReturn(filterRepository); - final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); - assertThrows(AssertionError.class, () -> remoteClusterStateService.writeFullMetadata(clusterState)); + assertThrows(AssertionError.class, () -> remoteClusterStateService.start()); } public void testWriteFullMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -163,6 +179,7 @@ public void testWriteFullMetadataSuccess() throws IOException { .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -187,6 +204,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { return null; }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); @@ -226,7 +244,11 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); assertEquals(writtenIndexMetadata.getIndex().getUUID(), "index-uuid"); long expectedChecksum = RemoteTransferContainer.checksumOfChecksum(new ByteArrayIndexInput("metadata-filename", writtenBytes), 8); - assertEquals(capturedWriteContext.getExpectedChecksum().longValue(), expectedChecksum); + if (capturedWriteContext.doRemoteDataIntegrityCheck()) { + assertEquals(capturedWriteContext.getExpectedChecksum().longValue(), expectedChecksum); + } else { + assertEquals(capturedWriteContext.getExpectedChecksum(), null); + } } @@ -241,6 +263,7 @@ public void testWriteFullMetadataInParallelFailure() throws IOException { return null; }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + remoteClusterStateService.start(); assertThrows( RemoteClusterStateService.IndexMetadataTransferException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState) @@ -249,6 +272,7 @@ public void testWriteFullMetadataInParallelFailure() throws IOException { public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().build(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata(clusterState, clusterState, null); Assert.assertThat(manifest, nullValue()); } @@ -275,7 +299,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(Collections.emptyList()).build(); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( previousClusterState, clusterState, @@ -290,6 +314,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -314,7 +339,7 @@ public void testReadLatestMetadataManifestFailedIOException() throws IOException ) ).thenThrow(IOException.class); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestClusterMetadataManifest( @@ -337,15 +362,12 @@ public void testReadLatestMetadataManifestFailedNoManifestFileInRemote() throws ) ).thenReturn(List.of()); - remoteClusterStateService.ensureRepositorySet(); - Exception e = assertThrows( - IllegalStateException.class, - () -> remoteClusterStateService.getLatestClusterMetadataManifest( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) + remoteClusterStateService.start(); + Optional manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() ); - assertEquals(e.getMessage(), "Remote Cluster State not found - " + clusterState.metadata().clusterUUID()); + assertEquals(manifest, Optional.empty()); } public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRemote() throws IOException { @@ -362,7 +384,7 @@ public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRe ).thenReturn(Arrays.asList(blobMetadata)); when(blobContainer.readBlob("manifestFileName")).thenThrow(FileNotFoundException.class); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestClusterMetadataManifest( @@ -383,12 +405,13 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); BlobContainer blobContainer = mockBlobStoreObjects(); mockBlobContainer(blobContainer, expectedManifest, Map.of()); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); assertEquals( remoteClusterStateService.getLatestIndexMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) .size(), @@ -408,13 +431,14 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); BlobContainer blobContainer = mockBlobStoreObjects(); mockBlobContainer(blobContainer, expectedManifest, Map.of()); when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename() + ".dat")).thenThrow(FileNotFoundException.class); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestIndexMetadata( @@ -438,14 +462,15 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, new HashMap<>()); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() - ); + ).get(); assertThat(manifest.getIndices().size(), is(1)); assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); @@ -459,7 +484,7 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { public void testReadLatestIndexMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final Index index = new Index("test-index", "index-uuid"); String fileName = "metadata-" + index.getUUID(); @@ -482,6 +507,7 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); @@ -500,7 +526,7 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { public void testMarkLastStateAsCommittedSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); @@ -514,6 +540,7 @@ public void testMarkLastStateAsCommittedSuccess() throws IOException { .stateUUID("state-uuid") .clusterUUID("cluster-uuid") .nodeId("nodeA") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -526,6 +553,98 @@ public void testMarkLastStateAsCommittedSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + public void testGetValidPreviousClusterUUID() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDForInvalidChain() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + ClusterState.UNKNOWN_UUID, + "cluster-uuid3", + "cluster-uuid2" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + BlobContainer blobContainer1 = mock(BlobContainer.class); + BlobContainer blobContainer2 = mock(BlobContainer.class); + BlobContainer blobContainer3 = mock(BlobContainer.class); + BlobContainer uuidBlobContainer = mock(BlobContainer.class); + when(blobContainer1.path()).thenReturn(blobPath); + when(blobContainer2.path()).thenReturn(blobPath); + when(blobContainer3.path()).thenReturn(blobPath); + + mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); + final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( + "cluster-uuid1", + clusterUUIDsPointers.get("cluster-uuid1"), + randomAlphaOfLength(10) + ); + mockBlobContainer(blobContainer1, clusterManifest1, Map.of()); + + final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( + "cluster-uuid2", + clusterUUIDsPointers.get("cluster-uuid2"), + randomAlphaOfLength(10) + ); + mockBlobContainer(blobContainer2, clusterManifest2, Map.of()); + + final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( + "cluster-uuid3", + clusterUUIDsPointers.get("cluster-uuid3"), + randomAlphaOfLength(10) + ); + mockBlobContainer(blobContainer3, clusterManifest3, Map.of()); + + when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn( + uuidBlobContainer, + blobContainer1, + blobContainer1, + blobContainer2, + blobContainer2, + blobContainer3, + blobContainer3 + ); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + } + + private ClusterMetadataManifest generateClusterMetadataManifest(String clusterUUID, String previousClusterUUID, String stateUUID) { + return ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(stateUUID) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(previousClusterUUID) + .committed(true) + .build(); + } + private BlobContainer mockBlobStoreObjects() { return mockBlobStoreObjects(BlobContainer.class); } @@ -542,6 +661,14 @@ private BlobContainer mockBlobStoreObjects(Class blobCo return blobContainer; } + private void mockBlobContainerForClusterUUIDs(BlobContainer blobContainer, Set clusterUUIDs) throws IOException { + Map blobContainerMap = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + blobContainerMap.put(clusterUUID, mockBlobStoreObjects()); + } + when(blobContainer.children()).thenReturn(blobContainerMap); + } + private void mockBlobContainer( BlobContainer blobContainer, ClusterMetadataManifest clusterMetadataManifest, diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index 7d36795d1a896..c34f13041cb11 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -52,6 +52,7 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -173,7 +174,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th threadPool, xContentRegistry(), dispatcher, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) { @Override @@ -238,7 +240,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, channel.sendResponse(emptyResponse(RestStatus.BAD_REQUEST)); } }, - clusterSettings + clusterSettings, + NoopTracer.INSTANCE ) { @Override protected HttpServerChannel bind(InetSocketAddress hostAddress) { diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 99673d4b01690..a1d6be84c9926 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -110,6 +110,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.internal.ReaderContext; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -204,7 +205,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); repositoriesService = new RepositoriesService( settings, diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 73073f0bb9e47..8363ea3757a2b 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -45,6 +45,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -78,7 +79,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 72a0fdaf5c77a..ed04d9a20f18e 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -49,6 +49,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 9895eddb911f5..d9bca55a208c2 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -49,6 +49,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -84,7 +85,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index bcda7b941aaee..6a99063d11353 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -14,7 +14,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -34,7 +33,6 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.hamcrest.MatcherAssert; import org.junit.Assert; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -65,14 +63,6 @@ public class RemoteIndexShardTests extends SegmentReplicationIndexShardTests { .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - protected Settings getIndexSettings() { return settings; } diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 2a4c572ee2875..4f5cad70fd643 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -13,7 +13,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngine; @@ -25,7 +24,6 @@ import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationType; import org.junit.Assert; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -43,14 +41,6 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - public void testStartSequenceForReplicaRecovery() throws Exception { final Path remoteDir = createTempDir(); final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index d22ad06245687..742dbdeba8c5b 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -620,8 +620,8 @@ public void testConflictingEngineFactories() { assertThat(e, hasToString(new RegexMatcher(pattern))); } - public void testClusterRemoteTranslogBufferIntervalNull() { + public void testClusterRemoteTranslogBufferIntervalDefault() { IndicesService indicesService = getIndicesService(); - assertNull(indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 8de652138e83e..dc4dca80ea110 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -111,6 +111,7 @@ import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -237,7 +238,8 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(SETTINGS, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); MetadataIndexUpgradeService metadataIndexUpgradeService = new MetadataIndexUpgradeService( SETTINGS, diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index ee6595ade1d34..22bf337b05598 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -70,6 +70,7 @@ import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -545,7 +546,8 @@ private IndicesClusterStateService createIndicesClusterStateService( TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService( diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java index 920a3f2946884..131514eb019b3 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; @@ -18,7 +17,6 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.replication.common.ReplicationType; -import org.junit.Before; import java.nio.file.Path; @@ -31,14 +29,6 @@ public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLe .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { final Path remoteDir = createTempDir(); final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 67729bbd5f76a..bcacef83d190a 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -23,6 +23,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; @@ -68,7 +69,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index f2e0baf28ecd1..8f84053f2618e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -29,6 +29,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyStateTests; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -102,7 +103,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 6314feeed3b09..c1f88a6938d33 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -37,6 +37,7 @@ import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 401700c2b2ab9..2cf006176022d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -25,6 +25,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -61,7 +62,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index d14858a4441b5..889d0dc6ddb14 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -77,6 +77,7 @@ import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -124,7 +125,8 @@ private RepositoriesService createRepositoriesServiceWithMockedClusterService(Cl TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); when(clusterApplierService.threadPool()).thenReturn(threadPool); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index a820c17d188d6..e3e1bf31e82dc 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -36,7 +36,6 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.Environment; import org.opensearch.gateway.remote.RemoteClusterStateService; @@ -49,7 +48,6 @@ import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -72,10 +70,6 @@ * Tests for the {@link BlobStoreRepository} and its subclasses. */ public class BlobStoreRepositoryRemoteIndexTests extends BlobStoreRepositoryHelperTests { - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } @Override protected Settings nodeSettings() { @@ -112,13 +106,12 @@ private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) .put(repoTypeAttributeKey, FsRepository.TYPE) .put(repoSettingsAttributeKeyPrefix + "location", repoPath) - .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) .build(); } // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot public void testRetrieveShallowCopySnapshotCase1() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -208,7 +201,6 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { } public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -259,7 +251,6 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot // -> remoteStoreShallowCopy Snapshot -> normal snapshot public void testRetrieveShallowCopySnapshotCase2() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index ea9f96ff925cd..e097c7025e4fe 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; @@ -105,11 +104,6 @@ protected void assertSnapshotOrGenericThread() { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index 2d125b8d36542..f0d930c4c3acb 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -32,6 +32,7 @@ import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -70,7 +71,7 @@ public class SearchBackpressureServiceTests extends OpenSearchTestCase { @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); taskManager = transportService.getTaskManager(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 9cc7a83d3c563..3c31c979ce856 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -221,6 +221,7 @@ import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1985,7 +1986,7 @@ public void onFailure(final Exception e) { return actualHandler; } } - }, a -> node, null, emptySet()); + }, a -> node, null, emptySet(), NoopTracer.INSTANCE); final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java index aa626013240b8..bb154b95f9f01 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java @@ -14,6 +14,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.Scheduler; @@ -47,7 +48,7 @@ public class TaskCancellationMonitoringServiceTests extends OpenSearchTestCase { @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); taskManager = transportService.getTaskManager(); diff --git a/server/src/test/java/org/opensearch/throttling/tracker/NodePerformanceTrackerTests.java b/server/src/test/java/org/opensearch/throttling/tracker/NodePerformanceTrackerTests.java index 9fdd491c12c64..41e03c482d4e4 100644 --- a/server/src/test/java/org/opensearch/throttling/tracker/NodePerformanceTrackerTests.java +++ b/server/src/test/java/org/opensearch/throttling/tracker/NodePerformanceTrackerTests.java @@ -11,6 +11,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.monitor.fs.FsService; import org.opensearch.node.NodePerformanceStatistics; import org.opensearch.node.PerformanceCollectorService; import org.opensearch.test.OpenSearchTestCase; @@ -52,7 +54,8 @@ public void testStats() throws InterruptedException { performanceCollectorService, threadPool, Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + mock(FsService.class) ); tracker.start(); Thread.sleep(2000); diff --git a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java index 510a2b3abd943..1c9880ed14714 100644 --- a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java @@ -42,6 +42,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -82,7 +83,7 @@ public MockTransportService startTransport(final String id, final Version versio .put("node.name", id) .put(settings) .build(); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.start(); newService.acceptIncomingRequests(); @@ -99,7 +100,14 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { try (MockTransportService transport1 = startTransport("node1", Version.CURRENT)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -138,7 +146,14 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -200,7 +215,14 @@ public void testConnectFailsWithIncompatibleNodes() { try (MockTransportService transport1 = startTransport("incompatible-node", incompatibleVersion)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -240,7 +262,14 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -303,7 +332,14 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception return address; }; - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -338,7 +374,14 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe try (MockTransportService remoteTransport = startTransport("node1", Version.CURRENT)) { TransportAddress remoteAddress = remoteTransport.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -441,7 +484,14 @@ public void testServerNameAttributes() { try (MockTransportService transport1 = startTransport("node1", Version.CURRENT, bindSettings)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java index a2a77168c8991..7595982837365 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -81,7 +82,14 @@ public void testSearchShards() throws Exception { Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -121,7 +129,14 @@ public void testSearchShardsThreadContextHeader() { Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java index b89d652510850..f3b7f9916d460 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; @@ -79,7 +80,14 @@ public void testConnectAndExecuteRequest() throws Exception { .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) .build(); - try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + localSettings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); // following two log lines added to investigate #41745, can be removed once issue is closed logger.info("Start accepting incoming requests on local transport service"); @@ -118,7 +126,14 @@ public void testEnsureWeReconnect() throws Exception { .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) .build(); - try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + localSettings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); // this test is not perfect since we might reconnect concurrently but it will fail most of the time if we don't have // the right calls in place in the RemoteAwareClient @@ -147,7 +162,9 @@ public void testEnsureWeReconnect() throws Exception { public void testRemoteClusterServiceNotEnabled() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java index d481f361f2e54..bb653439ec21e 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java @@ -66,6 +66,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -126,7 +127,7 @@ public static MockTransportService startTransport( boolean success = false; final Settings s = Settings.builder().put(settings).put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler( ClusterSearchShardsAction.NAME, @@ -231,7 +232,14 @@ public void run() { }; t.start(); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); CountDownLatch listenerCalled = new CountDownLatch(1); @@ -280,7 +288,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt List seedNodes = addresses(seedNode1, seedNode); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -367,7 +382,14 @@ public void testGetConnectionInfo() throws Exception { List seedNodes = addresses(node3, node1, node2); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); @@ -480,7 +502,14 @@ public void testCollectNodes() throws Exception { try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -515,7 +544,14 @@ public void testNoChannelsExceptREG() throws Exception { try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -568,7 +604,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted ); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -645,7 +688,14 @@ public void testGetConnection() throws Exception { DiscoveryNode disconnectedNode = disconnectedTransport.getLocalNode(); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java index f6f3e8fa60863..449715189c881 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java @@ -44,6 +44,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -162,7 +163,7 @@ public void testGroupClusterIndices() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -233,7 +234,7 @@ public void testGroupIndices() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -326,7 +327,7 @@ public void testIncrementallyAddClusters() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -393,7 +394,12 @@ public void testDefaultPingSchedule() throws IOException { } Settings settings = settingsBuilder.build(); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -436,7 +442,7 @@ public void testCustomPingSchedule() throws IOException { transportSettings, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -474,7 +480,7 @@ public void testChangeSettings() throws Exception { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -523,7 +529,12 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { Collections.shuffle(knownNodes, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -586,7 +597,12 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { Collections.shuffle(knownNodes, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -654,7 +670,12 @@ public void testCollectNodes() throws InterruptedException, IOException { Collections.shuffle(knownNodes_c2, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -901,7 +922,7 @@ public void testReconnectWhenStrategySettingsUpdated() throws Exception { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -983,7 +1004,14 @@ public void testSkipUnavailable() { knownNodes.add(seedNode); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -1002,7 +1030,9 @@ public void testSkipUnavailable() { public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final IllegalArgumentException e = expectThrows( @@ -1015,7 +1045,9 @@ public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { public void testRemoteClusterServiceNotEnabledGetCollectNodes() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final IllegalArgumentException e = expectThrows( diff --git a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java index ca85ec2270caf..c89a9d328b419 100644 --- a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java @@ -49,6 +49,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.MockTransportService; @@ -105,7 +106,7 @@ public MockTransportService startTransport( .put(settings) .build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler( ClusterStateAction.NAME, @@ -143,7 +144,14 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -192,7 +200,14 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep return seedNode; }; - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -240,7 +255,14 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn knownNodes.add(discoverableNode2); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -297,7 +319,14 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -336,7 +365,14 @@ public void testConnectFailsWithIncompatibleNodes() { DiscoveryNode incompatibleSeedNode = incompatibleSeedTransport.getLocalNode(); knownNodes.add(incompatibleSeedNode); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -378,7 +414,14 @@ public void testFilterNodesWithNodePredicate() { DiscoveryNode rejectedNode = randomBoolean() ? seedNode : discoverableNode; Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -424,7 +467,14 @@ public void testConnectFailsIfNoConnectionsOpened() { knownNodes.add(discoverableNode); closedTransport.close(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -474,7 +524,14 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro Collections.shuffle(knownNodes, random()); Collections.shuffle(otherKnownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -542,7 +599,14 @@ public void testMultipleCallsToConnectEnsuresConnection() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -589,8 +653,18 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { List knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService accessible = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService unresponsive1 = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); - MockTransportService unresponsive2 = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool) + MockTransportService unresponsive1 = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ); + MockTransportService unresponsive2 = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { // We start in order to get a valid address + port, but do not start accepting connections as we // will not actually connect to these transports @@ -616,7 +690,14 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -679,7 +760,14 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java index c23c284be1ca5..dd2aefd2318f7 100644 --- a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java @@ -40,6 +40,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,7 @@ public void tearDown() throws Exception { } private MockTransportService buildService(final Version version) { - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, version, threadPool, null); + MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, version, threadPool, NoopTracer.INSTANCE); service.start(); service.acceptIncomingRequests(); return service; diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java index c0dd854139ee5..d10b4f26100cc 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; @@ -82,7 +83,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, ignored -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.registerRequestHandler( diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java index 5abb032120dcf..67db155779ec6 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -100,7 +101,8 @@ private NetworkHandle startServices(String nodeNameAndId, Settings settings, Ver version ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index d49d3d290b8a8..d24cc24d28579 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -88,6 +88,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; @@ -844,14 +845,16 @@ class MockPersistedState implements CoordinationState.PersistedState { private final CoordinationState.PersistedState delegate; private final NodeEnvironment nodeEnvironment; + private MockGatewayMetaState mockGatewayMetaState; + MockPersistedState(DiscoveryNode localNode) { try { if (rarely()) { nodeEnvironment = newNodeEnvironment(); nodeEnvironments.add(nodeEnvironment); - final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(localNode, bigArrays); - gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry(), persistedStateRegistry()); - delegate = gatewayMetaState.getPersistedState(); + mockGatewayMetaState = new MockGatewayMetaState(localNode, bigArrays); + mockGatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry(), persistedStateRegistry()); + delegate = mockGatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; delegate = new InMemoryPersistedState( @@ -1114,7 +1117,8 @@ protected Optional getDisruptableMockTransport(Transpo getTransportInterceptor(localNode, threadPool), a -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); clusterManagerService = new AckedFakeThreadPoolClusterManagerService( localNode.getId(), diff --git a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java index bf59e9ff06cbe..d77596cf5cdd1 100644 --- a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java @@ -46,16 +46,13 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.plugins.MetadataUpgrader; -import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; -import java.util.function.Supplier; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -68,10 +65,26 @@ public class MockGatewayMetaState extends GatewayMetaState { private final DiscoveryNode localNode; private final BigArrays bigArrays; + private final RemoteClusterStateService remoteClusterStateService; + private final RemoteStoreRestoreService remoteStoreRestoreService; public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays) { this.localNode = localNode; this.bigArrays = bigArrays; + this.remoteClusterStateService = mock(RemoteClusterStateService.class); + this.remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + } + + public MockGatewayMetaState( + DiscoveryNode localNode, + BigArrays bigArrays, + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService + ) { + this.localNode = localNode; + this.bigArrays = bigArrays; + this.remoteClusterStateService = remoteClusterStateService; + this.remoteStoreRestoreService = remoteStoreRestoreService; } @Override @@ -90,6 +103,11 @@ ClusterState prepareInitialClusterState(TransportService transportService, Clust return ClusterStateUpdaters.setLocalNode(clusterState, localNode); } + @Override + public void close() throws IOException { + super.close(); + } + public void start( Settings settings, NodeEnvironment nodeEnvironment, @@ -108,26 +126,6 @@ public void start( } catch (IOException e) { throw new AssertionError(e); } - Supplier remoteClusterStateServiceSupplier = () -> { - if (isRemoteStoreClusterStateEnabled(settings)) { - return new RemoteClusterStateService( - nodeEnvironment.nodeId(), - () -> new RepositoriesService( - settings, - clusterService, - transportService, - Collections.emptyMap(), - Collections.emptyMap(), - transportService.getThreadPool() - ), - settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - () -> 0L - ); - } else { - return null; - } - }; start( settings, transportService, @@ -142,8 +140,9 @@ public void start( new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ), - remoteClusterStateServiceSupplier.get(), - persistedStateRegistry + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService ); } } diff --git a/test/framework/src/main/java/org/opensearch/node/MockNode.java b/test/framework/src/main/java/org/opensearch/node/MockNode.java index 803a613ba55ff..e6c7e21d5b3ea 100644 --- a/test/framework/src/main/java/org/opensearch/node/MockNode.java +++ b/test/framework/src/main/java/org/opensearch/node/MockNode.java @@ -60,6 +60,7 @@ import org.opensearch.search.SearchService; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -199,16 +200,35 @@ protected TransportService newTransportService( TransportInterceptor interceptor, Function localNodeFactory, ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { // we use the MockTransportService.TestPlugin class as a marker to create a network // module with this MockNetworkService. NetworkService is such an integral part of the systme // we don't allow to plug it in from plugins or anything. this is a test-only override and // can't be done in a production env. if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).isEmpty()) { - return super.newTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return super.newTransportService( + settings, + transport, + threadPool, + interceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + tracer + ); } else { - return new MockTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new MockTransportService( + settings, + transport, + threadPool, + interceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + tracer + ); } } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index f06a6e26defeb..25f453fe024ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2090,7 +2090,11 @@ protected boolean addMockGeoShapeFieldMapper() { return true; } - /** Returns {@code true} if this test cluster should have tracing enabled with MockTelemetryPlugin */ + /** + * Returns {@code true} if this test cluster should have tracing enabled with MockTelemetryPlugin + * Disabling this for now as the existing way of strict check do not support multiple nodes internal cluster. + * @return boolean. + */ protected boolean addMockTelemetryPlugin() { return true; } @@ -2316,8 +2320,12 @@ private static void initializeSuiteScope() throws Exception { */ assert INSTANCE == null; if (isSuiteScopedTest(targetClass)) { - // note we need to do this this way to make sure this is reproducible - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); + // note we need to do this way to make sure this is reproducible + if (isSuiteScopedTestParameterized(targetClass)) { + INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor(Settings.class).newInstance(Settings.EMPTY); + } else { + INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); + } boolean success = false; try { INSTANCE.printTestMessage("setup"); @@ -2412,6 +2420,16 @@ private static boolean isSuiteScopedTest(Class clazz) { return clazz.getAnnotation(SuiteScopeTestCase.class) != null; } + /* + * For tests defined with, SuiteScopeTestCase return true if the + * class has a constructor that takes a single Settings parameter + * */ + private static boolean isSuiteScopedTestParameterized(Class clazz) { + return Arrays.stream(clazz.getConstructors()) + .filter(x -> x.getParameterTypes().length == 1) + .anyMatch(x -> x.getParameterTypes()[0].equals(Settings.class)); + } + /** * If a test is annotated with {@link SuiteScopeTestCase} * the checks and modifications that are applied to the used test cluster are only done after all tests diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java index 2029d9893ea35..4f3884f97a570 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; @@ -91,9 +92,10 @@ public TransportService createTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java index 42a7b63a3d762..24aef714cc259 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java @@ -45,6 +45,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ClusterConnectionManager; @@ -80,7 +81,8 @@ public TransportService createTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ClusterConnectionManager(settings, this)); connectionManager.setDefaultNodeConnectedBehavior((cm, node) -> false); @@ -93,7 +95,8 @@ public TransportService createTransportService( localNodeFactory, clusterSettings, taskHeaders, - connectionManager + connectionManager, + tracer ); } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index 0b4d801fb75f1..c1795e61096ac 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -57,6 +57,7 @@ import org.opensearch.node.Node; import org.opensearch.plugins.Plugin; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; @@ -114,18 +115,19 @@ public List> getSettings() { } } - public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool) { - return createNewService(settings, version, threadPool, null); + public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, Tracer tracer) { + return createNewService(settings, version, threadPool, null, tracer); } public static MockTransportService createNewService( Settings settings, Version version, ThreadPool threadPool, - @Nullable ClusterSettings clusterSettings + @Nullable ClusterSettings clusterSettings, + Tracer tracer ) { MockNioTransport mockTransport = newMockTransport(settings, version, threadPool); - return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet()); + return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet(), tracer); } public static MockNioTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { @@ -148,9 +150,10 @@ public static MockTransportService createNewService( Version version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - return createNewService(settings, transport, version, threadPool, clusterSettings, taskHeaders, NOOP_TRANSPORT_INTERCEPTOR); + return createNewService(settings, transport, version, threadPool, clusterSettings, taskHeaders, NOOP_TRANSPORT_INTERCEPTOR, tracer); } public static MockTransportService createNewService( @@ -160,7 +163,8 @@ public static MockTransportService createNewService( ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, Set taskHeaders, - TransportInterceptor interceptor + TransportInterceptor interceptor, + Tracer tracer ) { return new MockTransportService( settings, @@ -176,7 +180,8 @@ public static MockTransportService createNewService( version ), clusterSettings, - taskHeaders + taskHeaders, + tracer ); } @@ -194,7 +199,8 @@ public MockTransportService( Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, - @Nullable ClusterSettings clusterSettings + @Nullable ClusterSettings clusterSettings, + Tracer tracer ) { this( settings, @@ -207,7 +213,8 @@ public MockTransportService( settings.get(Node.NODE_NAME_SETTING.getKey(), UUIDs.randomBase64UUID()) ), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + tracer ); } @@ -225,9 +232,10 @@ public MockTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } private MockTransportService( @@ -237,7 +245,8 @@ private MockTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { super( settings, @@ -247,7 +256,8 @@ private MockTransportService( localNodeFactory, clusterSettings, taskHeaders, - new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)) + new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)), + tracer ); this.original = transport.getDelegate(); } diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 3bc8901e6a29b..3b64e044e7bf0 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -67,6 +67,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.Node; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -227,7 +228,8 @@ private MockTransportService buildService( threadPool, clusterSettings, Collections.emptySet(), - interceptor + interceptor, + NoopTracer.INSTANCE ); service.start(); if (acceptRequests) { diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java index 516cfe1636bf7..6b64270ca68e1 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; import org.opensearch.threadpool.ThreadPool; @@ -161,7 +162,8 @@ protected void execute(Runnable runnable) { NOOP_TRANSPORT_INTERCEPTOR, a -> node1, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); service2 = transport2.createTransportService( Settings.EMPTY, @@ -169,7 +171,8 @@ protected void execute(Runnable runnable) { NOOP_TRANSPORT_INTERCEPTOR, a -> node2, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); service1.start(); diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java index 892c1a8b3eeae..694a6a2b9e45b 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -194,4 +194,12 @@ private static String generateTraceId() { public Object getAttribute(String key) { return metadata.get(key); } + + /** + * Returns the attributes as map. + * @return returns the attributes map. + */ + public Map getAttributes() { + return metadata; + } } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java index bc71d097ac28b..0658a6421f3f3 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java @@ -9,6 +9,7 @@ package org.opensearch.test.telemetry.tracing; import java.util.Arrays; +import java.util.Map; /** * MockSpanData model for storing Telemetry information for testing. @@ -17,6 +18,7 @@ public class MockSpanData { /** * MockSpanData constructor with spanID, parentSpanID, traceID, startEpochNanos, endEpochNanos, hasEnded params. + * * @param spanID spanID * @param parentSpanID spanID of the parentSpan * @param traceID traceID of the request @@ -24,6 +26,7 @@ public class MockSpanData { * @param endEpochNanos endTime of span in epochNanos * @param hasEnded value if the span is closed * @param spanName Name of the span emitted + * @param attributes span attributes */ public MockSpanData( String spanID, @@ -32,7 +35,8 @@ public MockSpanData( long startEpochNanos, long endEpochNanos, boolean hasEnded, - String spanName + String spanName, + Map attributes ) { this.spanID = spanID; this.traceID = traceID; @@ -41,10 +45,12 @@ public MockSpanData( this.endEpochNanos = endEpochNanos; this.hasEnded = hasEnded; this.spanName = spanName; + this.attributes = attributes; } /** * MockSpanData constructor with spanID, parentSpanID, traceID, startEpochNanos, hasEnded and spanName params. + * * @param spanID spanID * @param parentSpanID spanID of the parentSpan * @param traceID traceID of the request @@ -52,6 +58,7 @@ public MockSpanData( * @param hasEnded value if the span is closed * @param spanName Name of the span emitted * @param stackTrace StackTrace to debug the problematic span + * @param attributes span attributes */ public MockSpanData( String spanID, @@ -60,7 +67,8 @@ public MockSpanData( long startEpochNanos, boolean hasEnded, String spanName, - StackTraceElement[] stackTrace + StackTraceElement[] stackTrace, + Map attributes ) { this.spanID = spanID; this.traceID = traceID; @@ -69,6 +77,7 @@ public MockSpanData( this.hasEnded = hasEnded; this.spanName = spanName; this.stackTrace = stackTrace; + this.attributes = attributes; } private final String spanID; @@ -79,6 +88,7 @@ public MockSpanData( private final long startEpochNanos; private long endEpochNanos; private boolean hasEnded; + private Map attributes; private StackTraceElement[] stackTrace; @@ -147,6 +157,14 @@ public void setHasEnded(boolean hasEnded) { this.hasEnded = hasEnded; } + /** + * Returns the attributes + * @return returns the attributes map. + */ + public Map getAttributes() { + return attributes; + } + @Override public String toString() { return "MockSpanData{" @@ -168,6 +186,8 @@ public String toString() { + endEpochNanos + ", hasEnded=" + hasEnded + + ", attributes=" + + attributes + ", stackTrace=" + Arrays.toString(stackTrace) + '}'; diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index c6e57531d23df..f7ebb3ee18a9b 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -60,7 +60,8 @@ private MockSpanData toMockSpanData(Span span) { System.nanoTime(), false, span.getSpanName(), - Thread.currentThread().getStackTrace() + Thread.currentThread().getStackTrace(), + (span instanceof MockSpan) ? ((MockSpan) span).getAttributes() : Map.of() ); return spanData; } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java index 3e18e4b873557..5fe268a8f0581 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java @@ -8,11 +8,13 @@ package org.opensearch.test.telemetry.tracing.validators; +import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.telemetry.tracing.MockSpanData; import org.opensearch.test.telemetry.tracing.TracingValidator; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -21,10 +23,16 @@ */ public class NumberOfTraceIDsEqualToRequests implements TracingValidator { + private static final String FILTERING_ATTRIBUTE = "action"; + private final Attributes attributes; + /** - * Base Constructor + * Constructor. + * @param attributes attributes. */ - public NumberOfTraceIDsEqualToRequests() {} + public NumberOfTraceIDsEqualToRequests(Attributes attributes) { + this.attributes = attributes; + } /** * validates if all spans emitted for a particular request have same traceID. @@ -33,11 +41,25 @@ public NumberOfTraceIDsEqualToRequests() {} */ @Override public List validate(List spans, int requests) { - Set totalTraceIDs = spans.stream().map(MockSpanData::getTraceID).collect(Collectors.toSet()); + Set totalTraceIDs = spans.stream() + .filter(span -> isMatchingSpan(span)) + .map(MockSpanData::getTraceID) + .collect(Collectors.toSet()); List problematicSpans = new ArrayList<>(); if (totalTraceIDs.size() != requests) { problematicSpans.addAll(spans); } return problematicSpans; } + + private boolean isMatchingSpan(MockSpanData mockSpanData) { + if (attributes.getAttributesMap().isEmpty()) { + return true; + } else { + return Objects.equals( + mockSpanData.getAttributes().get(FILTERING_ATTRIBUTE), + attributes.getAttributesMap().get(FILTERING_ATTRIBUTE) + ); + } + } }