From a3fb105190f5a7a38095075eb6ab2c8cbeef870e Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 24 Aug 2022 10:27:23 -0500 Subject: [PATCH 01/78] [BUG] Running "opensearch-service.bat start" and "opensearch-service.bat manager" (#4289) * [BUG] Update opensearch-service-x64.exe parameters to //ES for Execute Service. Update opensearch-service-mgr.exe parameters to //ES for Edit Service. Add code comments for the Apache Commons Daemon. Signed-off-by: Alex Burck * update changelog with pull request link Signed-off-by: Alex Burck Signed-off-by: Alex Burck --- CHANGELOG.md | 1 + distribution/src/bin/opensearch-service.bat | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a02bfdaf0320..513fb92ad2675 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Removed ### Fixed +- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) ### Security diff --git a/distribution/src/bin/opensearch-service.bat b/distribution/src/bin/opensearch-service.bat index 4dd8356340d10..8b91d806ef64f 100644 --- a/distribution/src/bin/opensearch-service.bat +++ b/distribution/src/bin/opensearch-service.bat @@ -8,6 +8,10 @@ if /i "%1" == "install" set NOJAVA= call "%~dp0opensearch-env.bat" %NOJAVA% || exit /b 1 +rem opensearch-service-x64.exe is based off of the Apache Commons Daemon procrun service application. +rem Run "opensearch-service-x64.exe version" for version information. +rem Run "opensearch-service-x64.exe help" for command options. +rem See https://commons.apache.org/proper/commons-daemon/procrun.html for more information. set EXECUTABLE=%OPENSEARCH_HOME%\bin\opensearch-service-x64.exe if "%SERVICE_ID%" == "" set SERVICE_ID=opensearch-service-x64 set ARCH=64-bit @@ -45,7 +49,8 @@ echo Usage: opensearch-service.bat install^|remove^|start^|stop^|manager [SERVIC goto:eof :doStart -"%EXECUTABLE%" //OPENSEARCH//%SERVICE_ID% %LOG_OPTS% +rem //ES == Execute Service +"%EXECUTABLE%" //ES//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto started echo Failed starting '%SERVICE_ID%' service exit /B 1 @@ -55,6 +60,7 @@ echo The service '%SERVICE_ID%' has been started goto:eof :doStop +rem //SS == Stop Service "%EXECUTABLE%" //SS//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto stopped echo Failed stopping '%SERVICE_ID%' service @@ -65,8 +71,11 @@ echo The service '%SERVICE_ID%' has been stopped goto:eof :doManagment +rem opensearch-service-mgr.exe is based off of the Apache Commons Daemon procrun monitor application. +rem See https://commons.apache.org/proper/commons-daemon/procrun.html for more information. set EXECUTABLE_MGR=%OPENSEARCH_HOME%\bin\opensearch-service-mgr -"%EXECUTABLE_MGR%" //OPENSEARCH//%SERVICE_ID% +rem //ES == Edit Service +"%EXECUTABLE_MGR%" //ES//%SERVICE_ID% if not errorlevel 1 goto managed echo Failed starting service manager for '%SERVICE_ID%' exit /B 1 @@ -77,6 +86,7 @@ goto:eof :doRemove rem Remove the service +rem //DS == Delete Service "%EXECUTABLE%" //DS//%SERVICE_ID% %LOG_OPTS% if not errorlevel 1 goto removed echo Failed removing '%SERVICE_ID%' service @@ -207,6 +217,7 @@ if not "%SERVICE_USERNAME%" == "" ( set SERVICE_PARAMS=%SERVICE_PARAMS% --ServiceUser "%SERVICE_USERNAME%" --ServicePassword "%SERVICE_PASSWORD%" ) ) +rem //IS == Install Service "%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %OPENSEARCH_START_TYPE% --StopTimeout %OPENSEARCH_STOP_TIMEOUT% --StartClass org.opensearch.bootstrap.OpenSearch --StartMethod main ++StartParams --quiet --StopClass org.opensearch.bootstrap.OpenSearch --StopMethod close --Classpath "%OPENSEARCH_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %OTHER_JAVA_OPTS% ++JvmOptions %OPENSEARCH_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%JAVA_HOME%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%OPENSEARCH_HOME%" %SERVICE_PARAMS% ++Environment HOSTNAME="%%COMPUTERNAME%%" if not errorlevel 1 goto installed From 0bf6b2ffc2152401bad2750532950eb0c12340c3 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 24 Aug 2022 09:06:48 -0700 Subject: [PATCH 02/78] Removing dead code in RecoveryTarget. (#4278) * Removing dead code in RecoveryTarget. This code in RecoveryTarget is not invoked, all of these methods are implemented by the parent ReplicationTarget with the same behavior. Signed-off-by: Marc Handalian * PR Comments. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- .../indices/recovery/RecoveryTarget.java | 47 ------------------- .../replication/common/ReplicationTarget.java | 2 +- 2 files changed, 1 insertion(+), 48 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 652f3c9a55f53..7acc6b8b54fdd 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -177,51 +177,6 @@ public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOEx return false; } - /** - * cancel the recovery. calling this method will clean temporary files and release the store - * unless this object is in use (in which case it will be cleaned once all ongoing users call - * {@link #decRef()} - *

- * if {@link #cancellableThreads()} was used, the threads will be interrupted. - */ - public void cancel(String reason) { - if (finished.compareAndSet(false, true)) { - try { - logger.debug("recovery canceled (reason: [{}])", reason); - cancellableThreads.cancel(reason); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - - /** - * fail the recovery and call listener - * - * @param e exception that encapsulating the failure - * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure - */ - public void fail(RecoveryFailedException e, boolean sendShardFailure) { - super.fail(e, sendShardFailure); - } - - /** mark the current recovery as done */ - public void markAsDone() { - if (finished.compareAndSet(false, true)) { - assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; - try { - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. - indexShard.postRecovery("peer recovery done"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - listener.onDone(state()); - } - } - @Override protected void closeInternal() { try { @@ -246,8 +201,6 @@ protected String getPrefix() { @Override protected void onDone() { assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; - // this might still throw an exception ie. if the shard is CLOSED due to some other event. - // it's safer to decrement the reference in a try finally here. indexShard.postRecovery("peer recovery done"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 501ff46eeb2ff..42f4572fef3e4 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -155,7 +155,7 @@ public void markAsDone() { public void cancel(String reason) { if (finished.compareAndSet(false, true)) { try { - logger.debug("replication cancelled (reason: [{}])", reason); + logger.debug("replication/recovery cancelled (reason: [{}])", reason); onCancel(reason); } finally { // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now From 5dd79479a2ca84a633a7583eb37df07504cd8a90 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 24 Aug 2022 15:13:33 -0700 Subject: [PATCH 03/78] Update the head ref to changelog verifier (#4296) * Update changelog contribution guide Signed-off-by: Kunal Kotwani * Fix reference to pull request Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 2 +- CHANGELOG.md | 1 + CONTRIBUTING.md | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 505b02426f22c..ee9bf5e18d0d5 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 with: token: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.ref }} + ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 513fb92ad2675..bc7baef8f83fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) +- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) ### Security diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16821b1915032..fc02d52f0bc3b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -131,8 +131,6 @@ As a contributor, you must ensure that every pull request has the changes listed Adding in the change is two step process - 1. Add your changes to the corresponding section within the CHANGELOG file with dummy pull request information, publish the PR - `Your change here ([#PR_NUMBER](PR_URL))` - 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. From 1bfabed0780c228f4f3c9a26aac2169e361c9426 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Thu, 25 Aug 2022 15:17:16 -0700 Subject: [PATCH 04/78] Add 2.x version to CHANGELOG (#4297) Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc7baef8f83fa..c258100894555 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,5 +17,21 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Security +## [2.x] +### Added +- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) + +### Changed + +### Deprecated + +### Removed + +### Fixed +- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) + +### Security + [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x From 1dbb63a8ee04812bd5f5ef5bc4995eac8b090438 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 26 Aug 2022 12:58:47 -0700 Subject: [PATCH 05/78] Do not fail replica shard due to primary closure (#4133) This commit prevents a replica shard from being failed in the case that a replication action to a replica is canceled due to the primary shard being closed. Signed-off-by: Andrew Ross Signed-off-by: Andrew Ross --- CHANGELOG.md | 1 + .../index/store/CorruptedFileIT.java | 101 ++++++++++++++++++ .../org/opensearch/OpenSearchException.java | 7 ++ .../PendingReplicationActions.java | 14 ++- .../replication/TransportWriteAction.java | 24 +++-- .../shard/PrimaryShardClosedException.java | 26 +++++ .../ExceptionSerializationTests.java | 2 + .../PendingReplicationActionsTests.java | 3 +- .../TransportWriteActionTests.java | 45 ++++++++ 9 files changed, 205 insertions(+), 18 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java diff --git a/CHANGELOG.md b/CHANGELOG.md index c258100894555..e988435a688da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index ee2067c591cef..960e17b76acb5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -40,6 +40,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.BytesRef; +import org.hamcrest.MatcherAssert; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -48,6 +49,7 @@ import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -108,6 +110,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; @@ -698,6 +701,104 @@ public void testReplicaCorruption() throws Exception { ensureGreen(TimeValue.timeValueSeconds(60)); } + public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); + final List dataNodeStats = nodeStats.getNodes() + .stream() + .filter(stat -> stat.getNode().isDataNode()) + .collect(Collectors.toUnmodifiableList()); + MatcherAssert.assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); + + final NodeStats primaryNode = dataNodeStats.get(0); + final NodeStats replicaNode = dataNodeStats.get(1); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.routing.allocation.include._name", primaryNode.getNode().getName()) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + .put("index.allocation.max_retries", Integer.MAX_VALUE) // keep on retrying + + ) + ); + ensureGreen(); + + // Add custom send behavior between primary and replica that will + // count down a latch to indicate that a replication operation is + // currently in flight, and then block on a second latch that will + // be released once the primary shard has been corrupted. + final CountDownLatch indexingInFlight = new CountDownLatch(1); + final CountDownLatch corruptionHasHappened = new CountDownLatch(1); + final MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode.getNode().getName() + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode.getNode().getName()), + (connection, requestId, action, request, options) -> { + if (request instanceof TransportReplicationAction.ConcreteShardRequest) { + indexingInFlight.countDown(); + try { + corruptionHasHappened.await(); + } catch (InterruptedException e) { + logger.info("Interrupted while waiting for corruption"); + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + // Configure the modified data node as a replica + final Settings build = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put("index.routing.allocation.include._name", primaryNode.getNode().getName() + "," + replicaNode.getNode().getName()) + .build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(build).get(); + client().admin().cluster().prepareReroute().get(); + ensureGreen(); + + // Create a snapshot repository. This repo is used to take a snapshot after + // corrupting a file, which causes the node to notice the corrupt data and + // close the shard. + assertAcked( + client().admin() + .cluster() + .preparePutRepository("test-repo") + .setType("fs") + .setSettings( + Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + ) + ); + + client().prepareIndex("test").setSource("field", "value").execute(); + indexingInFlight.await(); + + // Corrupt a file on the primary then take a snapshot. Snapshot should + // finish in the PARTIAL state since the corrupted file will cause a checksum + // validation failure. + final ShardRouting corruptedShardRouting = corruptRandomPrimaryFile(); + logger.info("--> {} corrupted", corruptedShardRouting); + final CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test") + .get(); + final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); + MatcherAssert.assertThat("Expect file corruption to cause PARTIAL snapshot state", snapshotState, equalTo(SnapshotState.PARTIAL)); + + // Unblock the blocked indexing thread now that corruption on the primary has been confirmed + corruptionHasHappened.countDown(); + + // Assert the cluster returns to green status because the replica will be promoted to primary + ensureGreen(); + } + private int numShards(String... index) { ClusterState state = client().admin().cluster().prepareState().get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4ebcd9622ce38..87efc03734d26 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -68,6 +68,7 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.Version.V_2_1_0; +import static org.opensearch.Version.V_3_0_0; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.common.xcontent.XContentParserUtils.ensureFieldName; @@ -1601,6 +1602,12 @@ private enum OpenSearchExceptionHandle { org.opensearch.indices.replication.common.ReplicationFailedException::new, 161, V_2_1_0 + ), + PRIMARY_SHARD_CLOSED_EXCEPTION( + org.opensearch.index.shard.PrimaryShardClosedException.class, + org.opensearch.index.shard.PrimaryShardClosedException::new, + 162, + V_3_0_0 ); final Class exceptionClass; diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index b305c4c8c83a7..7087b64758888 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.RetryableAction; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.index.shard.ShardId; @@ -45,6 +46,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Consumer; +import java.util.function.Supplier; /** * Pending Replication Actions @@ -121,7 +123,7 @@ synchronized void acceptNewTrackedAllocationIds(Set trackedAllocationIds } } - cancelActions(toCancel, "Replica left ReplicationGroup"); + cancelActions(toCancel, () -> new IndexShardClosedException(shardId, "Replica left ReplicationGroup")); } @Override @@ -129,15 +131,11 @@ public synchronized void close() { ArrayList>> toCancel = new ArrayList<>(onGoingReplicationActions.values()); onGoingReplicationActions.clear(); - cancelActions(toCancel, "Primary closed."); + cancelActions(toCancel, () -> new PrimaryShardClosedException(shardId)); } - private void cancelActions(ArrayList>> toCancel, String message) { + private void cancelActions(ArrayList>> toCancel, Supplier exceptionSupplier) { threadPool.executor(ThreadPool.Names.GENERIC) - .execute( - () -> toCancel.stream() - .flatMap(Collection::stream) - .forEach(action -> action.cancel(new IndexShardClosedException(shardId, message))) - ); + .execute(() -> toCancel.stream().flatMap(Collection::stream).forEach(action -> action.cancel(exceptionSupplier.get()))); } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 39fb89bc48568..7fc810808f560 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -52,6 +52,7 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.translog.Translog; @@ -514,15 +515,20 @@ public void failShardIfNeeded( if (TransportActions.isShardNotAvailableException(exception) == false) { logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); } - shardStateAction.remoteShardFailed( - replica.shardId(), - replica.allocationId().getId(), - primaryTerm, - true, - message, - exception, - listener - ); + // If a write action fails due to the closure of the primary shard + // then the replicas should not be marked as failed since they are + // still up-to-date with the (now closed) primary shard + if (exception instanceof PrimaryShardClosedException == false) { + shardStateAction.remoteShardFailed( + replica.shardId(), + replica.allocationId().getId(), + primaryTerm, + true, + message, + exception, + listener + ); + } } @Override diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java new file mode 100644 index 0000000000000..d1b2bf9079289 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.shard; + +import java.io.IOException; + +import org.opensearch.common.io.stream.StreamInput; + +/** + * Exception to indicate failures are caused due to the closure of the primary + * shard. + * + * @opensearch.internal + */ +public class PrimaryShardClosedException extends IndexShardClosedException { + public PrimaryShardClosedException(ShardId shardId) { + super(shardId, "Primary closed"); + } + + public PrimaryShardClosedException(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 5a93d7c0bd86e..26b0ce7e9e20c 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -80,6 +80,7 @@ import org.opensearch.index.seqno.RetentionLeaseNotFoundException; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndexTemplateMissingException; @@ -858,6 +859,7 @@ public void testIds() { ids.put(159, NodeHealthCheckFailureException.class); ids.put(160, NoSeedNodeLeftException.class); ids.put(161, ReplicationFailedException.class); + ids.put(162, PrimaryShardClosedException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java index ec0cefed842cd..66d3b843529ab 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java @@ -38,6 +38,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -102,7 +103,7 @@ public void testAllocationIdActionWillBeCancelledOnClose() { pendingReplication.addPendingAction(allocationId, action); action.run(); pendingReplication.close(); - expectThrows(IndexShardClosedException.class, future::actionGet); + expectThrows(PrimaryShardClosedException.class, future::actionGet); } private class TestAction extends RetryableAction { diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 4da32a890fd0e..137aca4966936 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.support.replication; +import org.hamcrest.MatcherAssert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; @@ -57,6 +58,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; @@ -91,6 +93,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.emptyArray; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -395,6 +398,48 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { } } + public void testPrimaryClosedDoesNotFailShard() { + final CapturingTransport transport = new CapturingTransport(); + final TransportService transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + final ShardStateAction shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); + final TestAction action = new TestAction( + Settings.EMPTY, + "internal:testAction", + transportService, + clusterService, + shardStateAction, + threadPool + ); + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary(index, true, 1, 0); + ClusterServiceUtils.setState(clusterService, state); + final long primaryTerm = state.metadata().index(index).primaryTerm(0); + final ShardRouting shardRouting = state.routingTable().shardRoutingTable(shardId).replicaShards().get(0); + + // Assert that failShardIfNeeded is a no-op for the PrimaryShardClosedException failure + final AtomicInteger callbackCount = new AtomicInteger(0); + action.newReplicasProxy() + .failShardIfNeeded( + shardRouting, + primaryTerm, + "test", + new PrimaryShardClosedException(shardId), + ActionListener.wrap(callbackCount::incrementAndGet) + ); + MatcherAssert.assertThat(transport.getCapturedRequestsAndClear(), emptyArray()); + MatcherAssert.assertThat(callbackCount.get(), equalTo(0)); + } + private class TestAction extends TransportWriteAction { private final boolean withDocumentFailureOnPrimary; From c62cecb048bafe8b79709660956fe4cba5548872 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 26 Aug 2022 17:46:01 -0400 Subject: [PATCH 06/78] Some dependency updates (#4308) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 10 +++++----- client/rest/licenses/commons-codec-1.13.jar.sha1 | 1 - client/rest/licenses/commons-codec-1.15.jar.sha1 | 1 + client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 | 1 - client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 | 1 + client/rest/licenses/httpcore-4.4.12.jar.sha1 | 1 - client/rest/licenses/httpcore-4.4.15.jar.sha1 | 1 + client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 | 1 - client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 | 1 + client/sniffer/licenses/commons-codec-1.13.jar.sha1 | 1 - client/sniffer/licenses/commons-codec-1.15.jar.sha1 | 1 + client/sniffer/licenses/httpcore-4.4.12.jar.sha1 | 1 - client/sniffer/licenses/httpcore-4.4.15.jar.sha1 | 1 + .../licenses/commons-codec-1.13.jar.sha1 | 1 - .../licenses/commons-codec-1.15.jar.sha1 | 1 + .../licenses/commons-codec-1.13.jar.sha1 | 1 - .../licenses/commons-codec-1.15.jar.sha1 | 1 + .../licenses/httpcore-4.4.12.jar.sha1 | 1 - .../licenses/httpcore-4.4.15.jar.sha1 | 1 + .../discovery-ec2/licenses/commons-codec-1.13.jar.sha1 | 1 - .../discovery-ec2/licenses/commons-codec-1.15.jar.sha1 | 1 + .../discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 | 1 - .../discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 | 1 + .../discovery-gce/licenses/commons-codec-1.13.jar.sha1 | 1 - .../discovery-gce/licenses/commons-codec-1.15.jar.sha1 | 1 + .../discovery-gce/licenses/httpcore-4.4.12.jar.sha1 | 1 - .../discovery-gce/licenses/httpcore-4.4.15.jar.sha1 | 1 + .../licenses/commons-codec-1.13.jar.sha1 | 1 - .../licenses/commons-codec-1.15.jar.sha1 | 1 + .../licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../licenses/slf4j-api-1.7.36.jar.sha1 | 1 + .../repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../licenses/slf4j-api-1.7.36.jar.sha1 | 1 + .../licenses/commons-codec-1.13.jar.sha1 | 1 - .../licenses/commons-codec-1.15.jar.sha1 | 1 + .../licenses/commons-codec-1.13.jar.sha1 | 1 - .../licenses/commons-codec-1.15.jar.sha1 | 1 + .../repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 | 1 - .../repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 | 1 + .../repository-s3/licenses/commons-codec-1.13.jar.sha1 | 1 - .../repository-s3/licenses/commons-codec-1.15.jar.sha1 | 1 + .../repository-s3/licenses/httpcore-4.4.12.jar.sha1 | 1 - .../repository-s3/licenses/httpcore-4.4.15.jar.sha1 | 1 + 44 files changed, 27 insertions(+), 26 deletions(-) delete mode 100644 client/rest/licenses/commons-codec-1.13.jar.sha1 create mode 100644 client/rest/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 create mode 100644 client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-4.4.12.jar.sha1 create mode 100644 client/rest/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 create mode 100644 client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 delete mode 100644 client/sniffer/licenses/commons-codec-1.13.jar.sha1 create mode 100644 client/sniffer/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore-4.4.12.jar.sha1 create mode 100644 client/sniffer/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 create mode 100644 plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 create mode 100644 plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 create mode 100644 plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index e988435a688da..26ff011609635 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) ### Changed + - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 4af1acfed0ab2..876910d5351d0 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -15,7 +15,7 @@ snakeyaml = 1.26 icu4j = 70.1 supercsv = 2.4.0 log4j = 2.17.1 -slf4j = 1.6.2 +slf4j = 1.7.36 asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle @@ -26,10 +26,10 @@ joda = 2.10.13 # client dependencies httpclient = 4.5.13 -httpcore = 4.4.12 -httpasyncclient = 4.1.4 +httpcore = 4.4.15 +httpasyncclient = 4.1.5 commonslogging = 1.2 -commonscodec = 1.13 +commonscodec = 1.15 # plugin dependencies aws = 1.12.270 @@ -42,7 +42,7 @@ bouncycastle=1.70 randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 4.6.1 +mockito = 4.7.0 objenesis = 3.2 bytebuddy = 1.12.12 diff --git a/client/rest/licenses/commons-codec-1.13.jar.sha1 b/client/rest/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/client/rest/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/client/rest/licenses/commons-codec-1.15.jar.sha1 b/client/rest/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/client/rest/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 deleted file mode 100644 index 8360ab45c7ab3..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3a3240681faae3fa46b573a4c7e50cec9db0d86 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 new file mode 100644 index 0000000000000..366a9e31069a6 --- /dev/null +++ b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 @@ -0,0 +1 @@ +cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/client/rest/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/client/rest/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 deleted file mode 100644 index 4de932dc5aca0..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84cd29eca842f31db02987cfedea245af020198b \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..251b35ab6a1a5 --- /dev/null +++ b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 @@ -0,0 +1 @@ +85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 b/client/sniffer/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/client/sniffer/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/analysis-phonetic/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/discovery-gce/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/ingest-attachment/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/ingest-attachment/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/repository-azure/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 deleted file mode 100644 index a2f93ea55802b..0000000000000 --- a/plugins/repository-hdfs/licenses/slf4j-api-1.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/repository-hdfs/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 deleted file mode 100644 index 66b72c414d63a..0000000000000 --- a/plugins/repository-s3/licenses/commons-codec-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f18e1aa31031d89db6f01ba05d501258ce69d2c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 new file mode 100644 index 0000000000000..42a03b5d7a376 --- /dev/null +++ b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 @@ -0,0 +1 @@ +7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file From 65f966ed71ff9bc0a53490ee801943869c0f536d Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Fri, 26 Aug 2022 15:42:31 -0700 Subject: [PATCH 07/78] Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses (#4307) * Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses Signed-off-by: Tianli Feng * Add changelog Signed-off-by: Tianli Feng Signed-off-by: Tianli Feng Co-authored-by: Andrew Ross --- CHANGELOG.md | 1 + .../admin/indices/get/GetIndexRequest.java | 2 +- .../indices/get/GetIndexRequestBuilder.java | 2 +- .../mapping/get/GetMappingsRequest.java | 2 +- .../get/GetMappingsRequestBuilder.java | 2 +- .../indices/get/GetIndexRequestTests.java | 21 +++++++++++++++++++ 6 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 26ff011609635..8132c1281e412 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) ### Security diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index ee0b204c77aa3..9a7fae9f84a98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.action.support.master.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.ArrayUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index ed106c44ea36a..3019191e5570e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 1fd9323edd2f8..2c9bec8398b66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.action.support.master.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 0a6d7cac79133..85bf8c2ffd9c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.mapping.get; -import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java new file mode 100644 index 0000000000000..f0d3db71c27b7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.get; + +import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.is; + +public class GetIndexRequestTests extends OpenSearchTestCase { + public void testGetIndexRequestExtendsClusterInfoRequestOfDeprecatedClassPath() { + GetIndexRequest getIndexRequest = new GetIndexRequest().indices("test"); + assertThat(getIndexRequest instanceof ClusterInfoRequest, is(true)); + } +} From 7ea6e8865fa007471c187fe7b6cd7007059d6c69 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Mon, 29 Aug 2022 10:05:48 -0500 Subject: [PATCH 08/78] [BUG] Create logs directory before running OpenSearch on Windows (#4305) * [BUG] Create logs directory before running OpenSearch on Windows Signed-off-by: Alex Burck * update changlog pr link Signed-off-by: Alex Burck Signed-off-by: Alex Burck --- CHANGELOG.md | 1 + distribution/src/bin/opensearch-service.bat | 4 ++++ distribution/src/bin/opensearch.bat | 6 ++++++ 3 files changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8132c1281e412..360b47d05ff8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) - Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) diff --git a/distribution/src/bin/opensearch-service.bat b/distribution/src/bin/opensearch-service.bat index 8b91d806ef64f..a11dc8316e8b1 100644 --- a/distribution/src/bin/opensearch-service.bat +++ b/distribution/src/bin/opensearch-service.bat @@ -24,6 +24,10 @@ exit /B 1 set OPENSEARCH_VERSION=${project.version} if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs +rem The logs directory must exist for the service to start. +if not exist "%SERVICE_LOG_DIR%" ( + mkdir "%SERVICE_LOG_DIR%" +) if "x%1x" == "xx" goto displayUsage set SERVICE_CMD=%1 diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index 49a12aa5c968d..dda15124e1654 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -56,6 +56,12 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) +if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%OPENSEARCH_HOME%\logs +rem The logs directory must exist for the service to start. +if not exist "%SERVICE_LOG_DIR%" ( + mkdir "%SERVICE_LOG_DIR%" +) + SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent From cd961f39bf57ae92b4486451ce2841b9682c2582 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 29 Aug 2022 22:47:52 +0530 Subject: [PATCH 09/78] Use RemoteSegmentStoreDirectory instead of RemoteDirectory (#4240) * Use RemoteSegmentStoreDirectory instead of RemoteDirectory Signed-off-by: Sachin Kale --- CHANGELOG.md | 3 +- .../org/opensearch/index/IndexModule.java | 3 +- .../opensearch/index/shard/IndexShard.java | 5 +- .../shard/RemoteStoreRefreshListener.java | 176 +++++++++--- .../opensearch/index/shard/StoreRecovery.java | 7 +- .../index/store/RemoteIndexInput.java | 35 ++- .../store/RemoteSegmentStoreDirectory.java | 75 ++++- ...> RemoteSegmentStoreDirectoryFactory.java} | 22 +- .../opensearch/indices/IndicesService.java | 5 +- .../main/java/org/opensearch/node/Node.java | 6 +- .../opensearch/index/IndexModuleTests.java | 4 +- .../index/shard/IndexShardTests.java | 3 +- .../RemoteStoreRefreshListenerTests.java | 259 ++++++++++++------ .../index/store/RemoteIndexInputTests.java | 31 ++- ...oteSegmentStoreDirectoryFactoryTests.java} | 28 +- .../RemoteSegmentStoreDirectoryTests.java | 160 +++++++++-- .../snapshots/SnapshotResiliencyTests.java | 4 +- .../index/shard/IndexShardTestCase.java | 19 +- 18 files changed, 628 insertions(+), 217 deletions(-) rename server/src/main/java/org/opensearch/index/store/{RemoteDirectoryFactory.java => RemoteSegmentStoreDirectoryFactory.java} (58%) rename server/src/test/java/org/opensearch/index/store/{RemoteDirectoryFactoryTests.java => RemoteSegmentStoreDirectoryFactoryTests.java} (70%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 360b47d05ff8f..f11f407434e6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) ### Changed - - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) +- Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) +- Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index f8604caeab414..e52a2ba39ed52 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,7 +70,6 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -487,7 +486,7 @@ public IndexService newIndexService( NamedWriteableRegistry namedWriteableRegistry, BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, - RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 67a8e691fda0d..670af1f1c6fd9 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -48,8 +48,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; @@ -3228,8 +3226,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro final List internalRefreshListener = new ArrayList<>(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); if (isRemoteStoreEnabled()) { - Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - internalRefreshListener.add(new RemoteStoreRefreshListener(store.directory(), remoteDirectory)); + internalRefreshListener.add(new RemoteStoreRefreshListener(this)); } if (this.checkpointPublisher != null && indexSettings.isSegRepEnabled() && shardRouting.primary()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 4b549ec485c0e..0d32e8d56e4d2 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -11,32 +11,54 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.index.engine.EngineException; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import java.io.IOException; -import java.nio.file.NoSuchFileException; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; /** * RefreshListener implementation to upload newly created segment files to the remote store + * + * @opensearch.internal */ -public class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { +public final class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { + // Visible for testing + static final Set EXCLUDE_FILES = Set.of("write.lock"); + // Visible for testing + static final int LAST_N_METADATA_FILES_TO_KEEP = 10; + private final IndexShard indexShard; private final Directory storeDirectory; - private final Directory remoteDirectory; - // ToDo: This can be a map with metadata of the uploaded file as value of the map (GitHub #3398) - private final Set filesUploadedToRemoteStore; + private final RemoteSegmentStoreDirectory remoteDirectory; + private final Map localSegmentChecksumMap; + private long primaryTerm; private static final Logger logger = LogManager.getLogger(RemoteStoreRefreshListener.class); - public RemoteStoreRefreshListener(Directory storeDirectory, Directory remoteDirectory) throws IOException { - this.storeDirectory = storeDirectory; - this.remoteDirectory = remoteDirectory; - // ToDo: Handle failures in reading list of files (GitHub #3397) - this.filesUploadedToRemoteStore = new HashSet<>(Arrays.asList(remoteDirectory.listAll())); + public RemoteStoreRefreshListener(IndexShard indexShard) { + this.indexShard = indexShard; + this.storeDirectory = indexShard.store().directory(); + this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) + .getDelegate()).getDelegate(); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + localSegmentChecksumMap = new HashMap<>(); } @Override @@ -46,42 +68,112 @@ public void beforeRefresh() throws IOException { /** * Upload new segment files created as part of the last refresh to the remote segment store. - * The method also deletes segment files from remote store which are not part of local filesystem. + * This method also uploads remote_segments_metadata file which contains metadata of each segment file uploaded. * @param didRefresh true if the refresh opened a new reference - * @throws IOException in case of I/O error in reading list of local files */ @Override - public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh) { - Set localFiles = Set.of(storeDirectory.listAll()); - localFiles.stream().filter(file -> !filesUploadedToRemoteStore.contains(file)).forEach(file -> { - try { - remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); - filesUploadedToRemoteStore.add(file); - } catch (NoSuchFileException e) { - logger.info( - () -> new ParameterizedMessage("The file {} does not exist anymore. It can happen in case of temp files", file), - e - ); - } catch (IOException e) { - // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) - logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); - } - }); + public void afterRefresh(boolean didRefresh) { + synchronized (this) { + try { + if (indexShard.shardRouting.primary()) { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + this.remoteDirectory.init(); + } + try { + String lastCommittedLocalSegmentFileName = SegmentInfos.getLastCommitSegmentsFileName(storeDirectory); + if (!remoteDirectory.containsFile( + lastCommittedLocalSegmentFileName, + getChecksumOfLocalFile(lastCommittedLocalSegmentFileName) + )) { + deleteStaleCommits(); + } + try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + Collection refreshedLocalFiles = segmentInfos.files(true); + + List segmentInfosFiles = refreshedLocalFiles.stream() + .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) + .collect(Collectors.toList()); + Optional latestSegmentInfos = segmentInfosFiles.stream() + .max(Comparator.comparingLong(IndexFileNames::parseGeneration)); - Set remoteFilesToBeDeleted = new HashSet<>(); - // ToDo: Instead of deleting files in sync, mark them and delete in async/periodic flow (GitHub #3142) - filesUploadedToRemoteStore.stream().filter(file -> !localFiles.contains(file)).forEach(file -> { - try { - remoteDirectory.deleteFile(file); - remoteFilesToBeDeleted.add(file); - } catch (IOException e) { - // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) - logger.warn(() -> new ParameterizedMessage("Exception while deleting file {} from the remote segment store", file), e); + if (latestSegmentInfos.isPresent()) { + refreshedLocalFiles.addAll(SegmentInfos.readCommit(storeDirectory, latestSegmentInfos.get()).files(true)); + segmentInfosFiles.stream() + .filter(file -> !file.equals(latestSegmentInfos.get())) + .forEach(refreshedLocalFiles::remove); + + boolean uploadStatus = uploadNewSegments(refreshedLocalFiles); + if (uploadStatus) { + remoteDirectory.uploadMetadata( + refreshedLocalFiles, + storeDirectory, + indexShard.getOperationPrimaryTerm(), + segmentInfos.getGeneration() + ); + localSegmentChecksumMap.keySet() + .stream() + .filter(file -> !refreshedLocalFiles.contains(file)) + .collect(Collectors.toSet()) + .forEach(localSegmentChecksumMap::remove); + } + } + } catch (EngineException e) { + logger.warn("Exception while reading SegmentInfosSnapshot", e); + } + } catch (IOException e) { + // We don't want to fail refresh if upload of new segments fails. The missed segments will be re-tried + // in the next refresh. This should not affect durability of the indexed data after remote trans-log integration. + logger.warn("Exception while uploading new segments to the remote segment store", e); + } } - }); + } catch (Throwable t) { + logger.error("Exception in RemoteStoreRefreshListener.afterRefresh()", t); + } + } + } + + // Visible for testing + boolean uploadNewSegments(Collection localFiles) throws IOException { + AtomicBoolean uploadSuccess = new AtomicBoolean(true); + localFiles.stream().filter(file -> !EXCLUDE_FILES.contains(file)).filter(file -> { + try { + return !remoteDirectory.containsFile(file, getChecksumOfLocalFile(file)); + } catch (IOException e) { + logger.info( + "Exception while reading checksum of local segment file: {}, ignoring the exception and re-uploading the file", + file + ); + return true; + } + }).forEach(file -> { + try { + remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + } catch (IOException e) { + uploadSuccess.set(false); + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); + } + }); + return uploadSuccess.get(); + } + + private String getChecksumOfLocalFile(String file) throws IOException { + if (!localSegmentChecksumMap.containsKey(file)) { + try (IndexInput indexInput = storeDirectory.openInput(file, IOContext.DEFAULT)) { + String checksum = Long.toString(CodecUtil.retrieveChecksum(indexInput)); + localSegmentChecksumMap.put(file, checksum); + } + } + return localSegmentChecksumMap.get(file); + } - remoteFilesToBeDeleted.forEach(filesUploadedToRemoteStore::remove); + private void deleteStaleCommits() { + try { + remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP); + } catch (IOException e) { + logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e); } } } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 1190e8e6ab3d2..06916c4cc87fe 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -449,7 +449,12 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco } indexShard.preRecovery(); indexShard.prepareForIndexRecovery(); - final Directory remoteDirectory = remoteStore.directory(); + assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; + FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); + assert remoteStoreDirectory.getDelegate() instanceof FilterDirectory + : "Store.directory is not enclosing an instance of FilterDirectory"; + FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); + final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate(); final Store store = indexShard.store(); final Directory storeDirectory = store.directory(); store.incRef(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java index 8f8d5dd5418ae..2c809563ca961 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java @@ -27,27 +27,37 @@ public class RemoteIndexInput extends IndexInput { private final InputStream inputStream; private final long size; + private long filePointer; public RemoteIndexInput(String name, InputStream inputStream, long size) { super(name); this.inputStream = inputStream; this.size = size; + this.filePointer = 0; } @Override public byte readByte() throws IOException { byte[] buffer = new byte[1]; - inputStream.read(buffer); + int numberOfBytesRead = inputStream.read(buffer); + if (numberOfBytesRead != -1) { + filePointer += numberOfBytesRead; + } return buffer[0]; } @Override public void readBytes(byte[] b, int offset, int len) throws IOException { int bytesRead = inputStream.read(b, offset, len); - while (bytesRead > 0 && bytesRead < len) { - len -= bytesRead; - offset += bytesRead; - bytesRead = inputStream.read(b, offset, len); + if (bytesRead == len) { + filePointer += bytesRead; + } else { + while (bytesRead > 0 && bytesRead < len) { + filePointer += bytesRead; + len -= bytesRead; + offset += bytesRead; + bytesRead = inputStream.read(b, offset, len); + } } } @@ -61,11 +71,6 @@ public long length() { return size; } - @Override - public void seek(long pos) throws IOException { - inputStream.skip(pos); - } - /** * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. * This method is not implemented as it is not used for the file transfer to/from the remote store. @@ -73,10 +78,18 @@ public void seek(long pos) throws IOException { * @throws UnsupportedOperationException always */ @Override - public long getFilePointer() { + public void seek(long pos) throws IOException { throw new UnsupportedOperationException(); } + /** + * Returns the current position in this file in terms of number of bytes read so far. + */ + @Override + public long getFilePointer() { + return filePointer; + } + /** * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. * This method is not implemented as it is not used for the file transfer to/from the remote store. diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index d7d6b29d08bfc..505ad6fafd550 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -24,9 +24,13 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; /** @@ -132,8 +136,9 @@ private Map readMetadataFile(String metadataFil /** * Metadata of a segment that is uploaded to remote segment store. */ - static class UploadedSegmentMetadata { - private static final String SEPARATOR = "::"; + public static class UploadedSegmentMetadata { + // Visible for testing + static final String SEPARATOR = "::"; private final String originalFilename; private final String uploadedFilename; private final String checksum; @@ -366,7 +371,69 @@ private String getLocalSegmentFilename(String remoteFilename) { } // Visible for testing - Map getSegmentsUploadedToRemoteStore() { - return this.segmentsUploadedToRemoteStore; + public Map getSegmentsUploadedToRemoteStore() { + return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore); + } + + /** + * Delete stale segment and metadata files + * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, + * we just need to read the latest metadata file. All the stale metadata files can be safely deleted. + * @param lastNMetadataFilesToKeep number of metadata files to keep + * @throws IOException in case of I/O error while reading from / writing to remote segment store + */ + public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { + Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); + List sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList()); + if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { + logger.info( + "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", + sortedMetadataFileList.size(), + lastNMetadataFilesToKeep + ); + return; + } + List latestNMetadataFiles = sortedMetadataFileList.subList( + sortedMetadataFileList.size() - lastNMetadataFilesToKeep, + sortedMetadataFileList.size() + ); + Map activeSegmentFilesMetadataMap = new HashMap<>(); + Set activeSegmentRemoteFilenames = new HashSet<>(); + for (String metadataFile : latestNMetadataFiles) { + Map segmentMetadataMap = readMetadataFile(metadataFile); + activeSegmentFilesMetadataMap.putAll(segmentMetadataMap); + activeSegmentRemoteFilenames.addAll( + segmentMetadataMap.values().stream().map(metadata -> metadata.uploadedFilename).collect(Collectors.toSet()) + ); + } + for (String metadataFile : sortedMetadataFileList.subList(0, sortedMetadataFileList.size() - lastNMetadataFilesToKeep)) { + Map staleSegmentFilesMetadataMap = readMetadataFile(metadataFile); + Set staleSegmentRemoteFilenames = staleSegmentFilesMetadataMap.values() + .stream() + .map(metadata -> metadata.uploadedFilename) + .collect(Collectors.toSet()); + AtomicBoolean deletionSuccessful = new AtomicBoolean(true); + staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { + try { + remoteDataDirectory.deleteFile(file); + if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { + segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); + } + } catch (NoSuchFileException e) { + logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile); + } catch (IOException e) { + deletionSuccessful.set(false); + logger.info( + "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried", + file, + metadataFile + ); + } + }); + if (deletionSuccessful.get()) { + logger.info("Deleting stale metadata file {} from remote segment store", metadataFile); + remoteMetadataDirectory.deleteFile(metadataFile); + } + } } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java similarity index 58% rename from server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java rename to server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 62f398cdad207..e77eb52bd3891 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -27,11 +27,11 @@ * * @opensearch.internal */ -public class RemoteDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { +public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { private final Supplier repositoriesService; - public RemoteDirectoryFactory(Supplier repositoriesService) { + public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService) { this.repositoriesService = repositoriesService; } @@ -39,13 +39,23 @@ public RemoteDirectoryFactory(Supplier repositoriesService) public Directory newDirectory(String repositoryName, IndexSettings indexSettings, ShardPath path) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath blobPath = new BlobPath(); - blobPath = blobPath.add(indexSettings.getIndex().getName()).add(String.valueOf(path.getShardId().getId())); - BlobContainer blobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(blobPath); - return new RemoteDirectory(blobContainer); + BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); + commonBlobPath = commonBlobPath.add(indexSettings.getIndex().getUUID()) + .add(String.valueOf(path.getShardId().getId())) + .add("segments"); + + RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); + RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); + + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } + private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) { + BlobPath extendedPath = commonBlobPath.add(extention); + BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); + return new RemoteDirectory(dataBlobContainer); + } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index fdb609ba7bbff..6808803ee0988 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -132,7 +132,6 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -266,7 +265,7 @@ public class IndicesService extends AbstractLifecycleComponent private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); private final boolean nodeWriteDanglingIndicesInfo; private final ValuesSourceRegistry valuesSourceRegistry; - private final RemoteDirectoryFactory remoteDirectoryFactory; + private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; @Override protected void doStart() { @@ -295,7 +294,7 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - RemoteDirectoryFactory remoteDirectoryFactory + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory ) { this.settings = settings; this.threadPool = threadPool; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index d3f0912cab638..3f4eadc52fd2a 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -39,12 +39,12 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.index.IndexingPressureService; -import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -629,7 +629,9 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); - final RemoteDirectoryFactory remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceReference::get); + final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + repositoriesServiceReference::get + ); final IndicesService indicesService = new IndicesService( settings, diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 45d93a5a12847..6bfdd9ae16773 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -89,7 +89,7 @@ import org.opensearch.index.similarity.NonNegativeScoresSimilarity; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -234,7 +234,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) ); } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 8c00ab97a46ea..662afa80f65fc 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2689,8 +2689,9 @@ public void testRestoreShardFromRemoteStore() throws IOException { storeDirectory.deleteFile(file); } + assertEquals(0, storeDirectory.listAll().length); + Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) target.remoteStore().directory()).getDelegate()).getDelegate(); - ((BaseDirectoryWrapper) remoteDirectory).setCheckIndexOnClose(false); // extra0 file is added as a part of https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html // Safe to remove without impacting the test diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index af92d821a9043..6b05d67836272 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -8,132 +8,209 @@ package org.opensearch.index.shard; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.opensearch.test.OpenSearchTestCase; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; +import org.junit.After; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.Store; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.NoSuchFileException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CountDownLatch; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.doThrow; +public class RemoteStoreRefreshListenerTests extends IndexShardTestCase { + private IndexShard indexShard; + private RemoteStoreRefreshListener remoteStoreRefreshListener; -public class RemoteStoreRefreshListenerTests extends OpenSearchTestCase { - private Directory storeDirectory; - private Directory remoteDirectory; + public void setup(boolean primary, int numberOfDocs) throws IOException { + indexShard = newStartedShard( + primary, + Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(), + new InternalEngineFactory() + ); - private RemoteStoreRefreshListener remoteStoreRefreshListener; + indexDocs(1, numberOfDocs); + indexShard.refresh("test"); - public void setup(String[] remoteFiles) throws IOException { - storeDirectory = mock(Directory.class); - remoteDirectory = mock(Directory.class); - when(remoteDirectory.listAll()).thenReturn(remoteFiles); - remoteStoreRefreshListener = new RemoteStoreRefreshListener(storeDirectory, remoteDirectory); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard); } - public void testAfterRefreshFalse() throws IOException { - setup(new String[0]); - remoteStoreRefreshListener.afterRefresh(false); - verify(storeDirectory, times(0)).listAll(); + private void indexDocs(int startDocId, int numberOfDocs) throws IOException { + for (int i = startDocId; i < startDocId + numberOfDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } } - public void testAfterRefreshTrueNoLocalFiles() throws IOException { - setup(new String[0]); + @After + public void tearDown() throws Exception { + Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate(); + ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + closeShards(indexShard); + super.tearDown(); + } - when(storeDirectory.listAll()).thenReturn(new String[0]); + public void testAfterRefresh() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); - verify(remoteDirectory, times(0)).deleteFile(any()); - } + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - public void testAfterRefreshOnlyUploadFiles() throws IOException { - setup(new String[0]); + verifyUploadedSegments(remoteSegmentStoreDirectory); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshOnlyUploadAndDelete() throws IOException { - setup(new String[] { "0.si", "0.cfs" }); + public void testAfterCommit() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); + flushShard(indexShard); - String[] localFiles = new String[] { "segments_1", "1.si", "1.cfs", "1.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.si", "1.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); - verify(remoteDirectory).deleteFile("0.si"); - verify(remoteDirectory).deleteFile("0.cfs"); + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshOnlyDelete() throws IOException { - setup(new String[] { "0.si", "0.cfs" }); + public void testRefreshAfterCommit() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); + flushShard(indexShard); - String[] localFiles = new String[] { "0.si" }; - when(storeDirectory.listAll()).thenReturn(localFiles); + indexDocs(4, 4); + indexShard.refresh("test"); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); - verify(remoteDirectory).deleteFile("0.cfs"); - } + indexDocs(8, 4); + indexShard.refresh("test"); - public void testAfterRefreshTempLocalFile() throws IOException { - setup(new String[0]); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs.tmp" }; - when(storeDirectory.listAll()).thenReturn(localFiles); - doThrow(new NoSuchFileException("0.cfs.tmp")).when(remoteDirectory) - .copyFrom(storeDirectory, "0.cfs.tmp", "0.cfs.tmp", IOContext.DEFAULT); + verifyUploadedSegments(remoteSegmentStoreDirectory); - remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } } - public void testAfterRefreshConsecutive() throws IOException { - setup(new String[0]); + public void testAfterMultipleCommits() throws IOException { + setup(true, 3); + assertDocs(indexShard, "1", "2", "3"); - String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFiles); - doThrow(new IOException("0.cfs")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfe", IOContext.DEFAULT); - doThrow(new IOException("0.cfe")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + for (int i = 0; i < RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP + 3; i++) { + indexDocs(4 * (i + 1), 4); + flushShard(indexShard); + } + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } + } + + public void testReplica() throws IOException { + setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); - verify(storeDirectory).listAll(); - verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); - verify(remoteDirectory, times(0)).deleteFile(any()); - String[] localFilesSecondRefresh = new String[] { "segments_1", "0.cfs", "1.cfs", "1.cfe" }; - when(storeDirectory.listAll()).thenReturn(localFilesSecondRefresh); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + + assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size()); + } + } + public void testReplicaPromotion() throws IOException, InterruptedException { + setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); - verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); - verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); - verify(remoteDirectory).deleteFile("0.si"); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) + .getDelegate(); + + assertEquals(0, remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().size()); + + final ShardRouting replicaRouting = indexShard.routingEntry(); + promoteReplica( + indexShard, + Collections.singleton(replicaRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(replicaRouting.shardId()).addShard(replicaRouting).build() + ); + + // The following logic is referenced from IndexShardTests.testPrimaryFillsSeqNoGapsOnPromotion + // ToDo: Add wait logic as part of promoteReplica() + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + + indexDocs(4, 4); + indexShard.refresh("test"); + remoteStoreRefreshListener.afterRefresh(true); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + + // This is to check if reading data from remote segment store works as well. + remoteSegmentStoreDirectory.init(); + + verifyUploadedSegments(remoteSegmentStoreDirectory); + } + + private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentStoreDirectory) throws IOException { + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + for (String file : segmentInfos.files(true)) { + if (!RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file)) { + assertTrue(uploadedSegments.containsKey(file)); + } + } + } } } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java index 273d3c7e37c56..cd35349e33b59 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -44,6 +44,7 @@ public void testReadByte() throws IOException { when(inputStream.read()).thenReturn(10); assertEquals(10, remoteIndexInput.readByte()); + assertEquals(1, remoteIndexInput.getFilePointer()); verify(inputStream).read(any()); } @@ -52,13 +53,19 @@ public void testReadByteIOException() throws IOException { when(inputStream.read(any())).thenThrow(new IOException("Error reading")); assertThrows(IOException.class, () -> remoteIndexInput.readByte()); + assertEquals(0, remoteIndexInput.getFilePointer()); } public void testReadBytes() throws IOException { - byte[] buffer = new byte[10]; - remoteIndexInput.readBytes(buffer, 10, 20); + byte[] buffer = new byte[20]; + when(inputStream.read(eq(buffer), anyInt(), anyInt())).thenReturn(10).thenReturn(3).thenReturn(6).thenReturn(-1); + remoteIndexInput.readBytes(buffer, 0, 20); - verify(inputStream).read(buffer, 10, 20); + verify(inputStream).read(buffer, 0, 20); + verify(inputStream).read(buffer, 10, 10); + verify(inputStream).read(buffer, 13, 7); + verify(inputStream).read(buffer, 19, 1); + assertEquals(19, remoteIndexInput.getFilePointer()); } public void testReadBytesMultipleIterations() throws IOException { @@ -95,20 +102,14 @@ public void testLength() { assertEquals(FILESIZE, remoteIndexInput.length()); } - public void testSeek() throws IOException { - remoteIndexInput.seek(10); - - verify(inputStream).skip(10); - } - - public void testSeekIOException() throws IOException { - when(inputStream.skip(10)).thenThrow(new IOException("Error reading")); - - assertThrows(IOException.class, () -> remoteIndexInput.seek(10)); + public void testSeek() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.seek(100L)); } - public void testGetFilePointer() { - assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.getFilePointer()); + public void testGetFilePointer() throws IOException { + when(inputStream.read(any(), eq(0), eq(8))).thenReturn(8); + remoteIndexInput.readBytes(new byte[8], 0, 8); + assertEquals(8, remoteIndexInput.getFilePointer()); } public void testSlice() { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java similarity index 70% rename from server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java rename to server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index e8357d2c184bf..0105d0dc309c2 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.Directory; import org.junit.Before; import org.mockito.ArgumentCaptor; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; @@ -27,29 +28,31 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collections; +import java.util.List; import java.util.function.Supplier; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; -public class RemoteDirectoryFactoryTests extends OpenSearchTestCase { +public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase { private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; - private RemoteDirectoryFactory remoteDirectoryFactory; + private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); repositoriesService = mock(RepositoriesService.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteDirectoryFactory = new RemoteDirectoryFactory(repositoriesServiceSupplier); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier); } public void testNewDirectory() throws IOException { - Settings settings = Settings.builder().build(); + Settings settings = Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "uuid_1").build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); @@ -57,20 +60,21 @@ public void testNewDirectory() throws IOException { BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); when(repository.blobStore()).thenReturn(blobStore); + when(repository.basePath()).thenReturn(new BlobPath().add("base_path")); when(blobStore.blobContainer(any())).thenReturn(blobContainer); when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); - try (Directory directory = remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { - assertTrue(directory instanceof RemoteDirectory); + try (Directory directory = remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { + assertTrue(directory instanceof RemoteSegmentStoreDirectory); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); - verify(blobStore).blobContainer(blobPathCaptor.capture()); - BlobPath blobPath = blobPathCaptor.getValue(); - assertEquals("foo/0/", blobPath.buildAsString()); + verify(blobStore, times(2)).blobContainer(blobPathCaptor.capture()); + List blobPaths = blobPathCaptor.getAllValues(); + assertEquals("base_path/uuid_1/0/segments/data/", blobPaths.get(0).buildAsString()); + assertEquals("base_path/uuid_1/0/segments/metadata/", blobPaths.get(1).buildAsString()); - directory.listAll(); - verify(blobContainer).listBlobs(); + verify(blobContainer).listBlobsByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX); verify(repositoriesService).repository("remote_store_repository"); } } @@ -85,7 +89,7 @@ public void testNewDirectoryRepositoryDoesNotExist() { assertThrows( IllegalArgumentException.class, - () -> remoteDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) + () -> remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) ); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 4eabfa74625f2..96f14616fb54b 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.tests.util.LuceneTestCase; import org.junit.Before; +import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; @@ -129,26 +130,52 @@ public void testInitNoMetadataFile() throws IOException { private Map getDummyMetadata(String prefix, int commitGeneration) { Map metadata = new HashMap<>(); - metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__qrt::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__zxd::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__yui::" + randomIntBetween(1000, 5000)); + + metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); metadata.put( "segments_" + commitGeneration, - "segments_" + commitGeneration + "::segments_" + commitGeneration + "__exv::" + randomIntBetween(1000, 5000) + "segments_" + + commitGeneration + + "::segments_" + + commitGeneration + + "__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) ); return metadata; } - private void populateMetadata() throws IOException { + private Map> populateMetadata() throws IOException { List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv"); when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( metadataFiles ); - IndexInput indexInput = mock(IndexInput.class); - Map dummyMetadata = getDummyMetadata("_0", 1); - when(indexInput.readMapOfStrings()).thenReturn(dummyMetadata); - when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput); + Map> metadataFilenameContentMapping = Map.of( + "metadata__1__5__abc", + getDummyMetadata("_0", 1), + "metadata__1__6__pqr", + getDummyMetadata("_0", 1), + "metadata__2__1__zxv", + getDummyMetadata("_0", 1) + ); + + IndexInput indexInput1 = mock(IndexInput.class); + when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc")); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1); + + IndexInput indexInput2 = mock(IndexInput.class); + when(indexInput2.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__6__pqr")); + when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn(indexInput2); + + IndexInput indexInput3 = mock(IndexInput.class); + when(indexInput3.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__2__1__zxv")); + when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput3); + + return metadataFilenameContentMapping; } public void testInit() throws IOException { @@ -291,20 +318,39 @@ public void testCopyFromException() throws IOException { } public void testContainsFile() throws IOException { - populateMetadata(); + List metadataFiles = List.of("metadata__1__5__abc"); + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + metadataFiles + ); + + Map metadata = new HashMap<>(); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345"); + + Map> metadataFilenameContentMapping = Map.of("metadata__1__5__abc", metadata); + + IndexInput indexInput1 = mock(IndexInput.class); + when(indexInput1.readMapOfStrings()).thenReturn(metadataFilenameContentMapping.get("metadata__1__5__abc")); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(indexInput1); + remoteSegmentStoreDirectory.init(); - // This is not the correct way to add files but the other way is to open up access to fields in UploadedSegmentMetadata Map uploadedSegmentMetadataMap = remoteSegmentStoreDirectory .getSegmentsUploadedToRemoteStore(); - uploadedSegmentMetadataMap.put( - "_100.si", - new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + + assertThrows( + UnsupportedOperationException.class, + () -> uploadedSegmentMetadataMap.put( + "_100.si", + new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + ) ); - assertTrue(remoteSegmentStoreDirectory.containsFile("_100.si", "1234")); - assertFalse(remoteSegmentStoreDirectory.containsFile("_100.si", "2345")); - assertFalse(remoteSegmentStoreDirectory.containsFile("_200.si", "1234")); + assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234")); + assertTrue(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfe", "1234000")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.cfs", "2345000")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_0.si", "23")); } public void testUploadMetadataEmpty() throws IOException { @@ -336,4 +382,84 @@ public void testUploadMetadataNonEmpty() throws IOException { String metadataString = remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get("_0.si").toString(); verify(indexOutput).writeMapOfStrings(Map.of("_0.si", metadataString)); } + + public void testDeleteStaleCommitsException() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( + new IOException("Error reading") + ); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteStaleSegments(5)); + } + + public void testDeleteStaleCommitsWithinThreshold() throws IOException { + populateMetadata(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=5 here so that none of the metadata files will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(5); + + verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); + } + + public void testDeleteStaleCommitsActualDelete() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + } + + public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + .values() + .stream() + .findAny() + .get() + .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(segmentFileWithException); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory, times(0)).deleteFile("metadata__1__5__abc"); + } + + public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOException { + Map> metadataFilenameContentMapping = populateMetadata(); + remoteSegmentStoreDirectory.init(); + + String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + .values() + .stream() + .findAny() + .get() + .split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + doThrow(new NoSuchFileException(segmentFileWithException)).when(remoteDataDirectory).deleteFile(segmentFileWithException); + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegments(2); + + for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; + verify(remoteDataDirectory).deleteFile(uploadedFilename); + } + ; + verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4b8eec70f2c1a..4d3b841e203de 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -172,7 +172,7 @@ import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -1826,7 +1826,7 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteDirectoryFactory(() -> repositoriesService) + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index f446538acccbb..08004b7e42fea 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -59,6 +59,10 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.uid.Versions; @@ -88,6 +92,8 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.store.RemoteDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.InternalTranslogFactory; @@ -123,6 +129,7 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -532,7 +539,10 @@ protected IndexShard newShard( ShardId shardId = shardPath.getShardId(); NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); - storeProvider = is -> createStore(is, remoteShardPath); + RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory); + storeProvider = is -> createStore(shardId, is, remoteSegmentStoreDirectory); remoteStore = storeProvider.apply(indexSettings); } indexShard = new IndexShard( @@ -570,6 +580,13 @@ protected IndexShard newShard( return indexShard; } + private RemoteDirectory newRemoteDirectory(Path f) throws IOException { + FsBlobStore fsBlobStore = new FsBlobStore(1024, f, false); + BlobPath blobPath = new BlobPath(); + BlobContainer fsBlobContainer = new FsBlobContainer(fsBlobStore, blobPath, f); + return new RemoteDirectory(fsBlobContainer); + } + /** * Takes an existing shard, closes it and starts a new initialing shard at the same location * From 7fe5830798b43f919ba1beed8669b711b149e60d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Mon, 29 Aug 2022 21:17:21 +0200 Subject: [PATCH 10/78] ZIP publication groupId value is configurable (#4156) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When publishing Zip POM the groupId value was hard-coded to `org.opensearch.plugin` value which worked fine for existing core plugins but is not convenient for other plugins (such as community plugins maintained in independent repositories). This PR changes the sources of the ZIP publishing groupId value. Specifically, there are two ways to set the value: 1) It is automatically inherited from the Gradle "project.group" 2) It can be manually specified in the ZIP publication POM object This PR also brings a major rework of tests in PublishTests class. Individual testing scenarios are driven by "real" gradle building scripts (utilizing `java-gradle-plugin` gradle plugin). Closes #3692 Signed-off-by: Lukáš Vlček Signed-off-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../opensearch/gradle/pluginzip/Publish.java | 44 +-- .../gradle/pluginzip/PublishTests.java | 339 +++++++++++------- .../pluginzip/customizedGroupValue.gradle | 45 +++ .../customizedInvalidGroupValue.gradle | 45 +++ .../pluginzip/groupAndVersionValue.gradle | 44 +++ .../pluginzip/missingGroupValue.gradle | 22 ++ .../pluginzip/missingPOMEntity.gradle | 22 ++ 8 files changed, 406 insertions(+), 156 deletions(-) create mode 100644 buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle create mode 100644 buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle diff --git a/CHANGELOG.md b/CHANGELOG.md index f11f407434e6b..52fa12d523659 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) +- Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) ### Deprecated diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index d83384ec7d172..70c3737ba3674 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -9,7 +9,8 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.publish.Publication; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; @@ -18,6 +19,9 @@ import org.gradle.api.Task; public class Publish implements Plugin { + + private static final Logger LOGGER = Logging.getLogger(Publish.class); + public final static String EXTENSION_NAME = "zipmavensettings"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; @@ -37,27 +41,25 @@ public static void configMaven(Project project) { }); }); publishing.publications(publications -> { - final Publication publication = publications.findByName(PUBLICATION_NAME); - if (publication == null) { - publications.create(PUBLICATION_NAME, MavenPublication.class, mavenZip -> { - String zipGroup = "org.opensearch.plugin"; - String zipArtifact = project.getName(); - String zipVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(zipGroup); - mavenZip.setArtifactId(zipArtifact); - mavenZip.setVersion(zipVersion); - }); - } else { - final MavenPublication mavenZip = (MavenPublication) publication; - String zipGroup = "org.opensearch.plugin"; - String zipArtifact = project.getName(); - String zipVersion = getProperty("version", project); - mavenZip.artifact(project.getTasks().named("bundlePlugin")); - mavenZip.setGroupId(zipGroup); - mavenZip.setArtifactId(zipArtifact); - mavenZip.setVersion(zipVersion); + MavenPublication mavenZip = (MavenPublication) publications.findByName(PUBLICATION_NAME); + + if (mavenZip == null) { + mavenZip = publications.create(PUBLICATION_NAME, MavenPublication.class); } + + String groupId = mavenZip.getGroupId(); + if (groupId == null) { + // The groupId is not customized thus we get the value from "project.group". + // See https://docs.gradle.org/current/userguide/publishing_maven.html#sec:identity_values_in_the_generated_pom + groupId = getProperty("group", project); + } + + String artifactId = project.getName(); + String pluginVersion = getProperty("version", project); + mavenZip.artifact(project.getTasks().named("bundlePlugin")); + mavenZip.setGroupId(groupId); + mavenZip.setArtifactId(artifactId); + mavenZip.setVersion(pluginVersion); }); }); } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 8c1314c4b4394..06632e2dfa476 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -10,19 +10,21 @@ import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testfixtures.ProjectBuilder; -import org.gradle.api.Project; +import org.gradle.testkit.runner.UnexpectedBuildFailure; import org.opensearch.gradle.test.GradleUnitTestCase; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import java.io.IOException; -import org.gradle.api.publish.maven.tasks.PublishToMavenRepository; import java.io.File; +import java.io.FileReader; import java.io.FileWriter; +import java.io.IOException; import java.io.Writer; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; @@ -30,14 +32,16 @@ import org.apache.maven.model.Model; import org.apache.maven.model.io.xpp3.MavenXpp3Reader; import org.codehaus.plexus.util.xml.pull.XmlPullParserException; -import java.io.FileReader; -import org.gradle.api.tasks.bundling.Zip; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; -import java.util.ArrayList; public class PublishTests extends GradleUnitTestCase { private TemporaryFolder projectDir; + private static final String TEMPLATE_RESOURCE_FOLDER = "pluginzip"; + private final String PROJECT_NAME = "sample-plugin"; + private final String ZIP_PUBLISH_TASK = "publishPluginZipPublicationToZipStagingRepository"; @Before public void setUp() throws IOException { @@ -51,155 +55,200 @@ public void tearDown() { } @Test - public void testZipPublish() throws IOException, XmlPullParserException { - String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; - prepareProjectForPublishTask(zipPublishTask); - - // Generate the build.gradle file - String buildFileContent = "apply plugin: 'maven-publish' \n" - + "apply plugin: 'java' \n" - + "publishing {\n" - + " repositories {\n" - + " maven {\n" - + " url = 'local-staging-repo/'\n" - + " name = 'zipStaging'\n" - + " }\n" - + " }\n" - + " publications {\n" - + " pluginZip(MavenPublication) {\n" - + " groupId = 'org.opensearch.plugin' \n" - + " artifactId = 'sample-plugin' \n" - + " version = '2.0.0.0' \n" - + " artifact('sample-plugin.zip') \n" - + " }\n" - + " }\n" - + "}"; - writeString(projectDir.newFile("build.gradle"), buildFileContent); - // Execute the task publishPluginZipPublicationToZipStagingRepository - List allArguments = new ArrayList(); - allArguments.add("build"); - allArguments.add(zipPublishTask); - GradleRunner runner = GradleRunner.create(); - runner.forwardOutput(); - runner.withPluginClasspath(); - runner.withArguments(allArguments); - runner.withProjectDir(projectDir.getRoot()); + public void missingGroupValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingGroupValue.gradle"); + Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); + assertTrue(e.getMessage().contains("Invalid publication 'pluginZip': groupId cannot be empty.")); + } + + /** + * This would be the most common use case where user declares Maven publication entity with basic info + * and the resulting POM file will use groupId and version values from the Gradle project object. + */ + @Test + public void groupAndVersionValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("groupAndVersionValue.gradle"); BuildResult result = runner.build(); - // Check if task publishMavenzipPublicationToZipstagingRepository has ran well - assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); - // check if the zip has been published to local staging repo + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // check if both the zip and pom files have been published to local staging repo assertTrue( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") - .exists() + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ).exists() ); - assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); - // Parse the maven file and validate the groupID to org.opensearch.plugin + assertTrue( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.zip" + ) + ).exists() + ); + + // Parse the maven file and validate the groupID MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) ) ); - assertEquals(model.getGroupId(), "org.opensearch.plugin"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getUrl(), "https://github.com/doe/sample-plugin"); } + /** + * In this case the Publication entity is completely missing but still the POM file is generated using the default + * values including the groupId and version values obtained from the Gradle project object. + */ @Test - public void testZipPublishWithPom() throws IOException, XmlPullParserException { - String zipPublishTask = "publishPluginZipPublicationToZipStagingRepository"; - Project project = prepareProjectForPublishTask(zipPublishTask); - - // Generate the build.gradle file - String buildFileContent = "apply plugin: 'maven-publish' \n" - + "apply plugin: 'java' \n" - + "publishing {\n" - + " repositories {\n" - + " maven {\n" - + " url = 'local-staging-repo/'\n" - + " name = 'zipStaging'\n" - + " }\n" - + " }\n" - + " publications {\n" - + " pluginZip(MavenPublication) {\n" - + " groupId = 'org.opensearch.plugin' \n" - + " artifactId = 'sample-plugin' \n" - + " version = '2.0.0.0' \n" - + " artifact('sample-plugin.zip') \n" - + " pom {\n" - + " name = 'sample-plugin'\n" - + " description = 'sample-description'\n" - + " licenses {\n" - + " license {\n" - + " name = \"The Apache License, Version 2.0\"\n" - + " url = \"http://www.apache.org/licenses/LICENSE-2.0.txt\"\n" - + " }\n" - + " }\n" - + " developers {\n" - + " developer {\n" - + " name = 'opensearch'\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " }\n" - + " }\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " scm {\n" - + " url = 'https://github.com/opensearch-project/OpenSearch'\n" - + " }\n" - + " }" - + " }\n" - + " }\n" - + "}"; - writeString(projectDir.newFile("build.gradle"), buildFileContent); - // Execute the task publishPluginZipPublicationToZipStagingRepository - List allArguments = new ArrayList(); - allArguments.add("build"); - allArguments.add(zipPublishTask); - GradleRunner runner = GradleRunner.create(); - runner.forwardOutput(); - runner.withPluginClasspath(); - runner.withArguments(allArguments); - runner.withProjectDir(projectDir.getRoot()); + public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle"); BuildResult result = runner.build(); - // Check if task publishMavenzipPublicationToZipstagingRepository has ran well - assertEquals(SUCCESS, result.task(":" + zipPublishTask).getOutcome()); - // check if the zip has been published to local staging repo - assertTrue( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.zip") - .exists() + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate it + MavenXpp3Reader reader = new MavenXpp3Reader(); + Model model = reader.read( + new FileReader( + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "org", + "custom", + "group", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) + ) ); + + assertEquals(model.getArtifactId(), PROJECT_NAME); + assertEquals(model.getGroupId(), "org.custom.group"); + assertEquals(model.getVersion(), "2.0.0.0"); + assertEquals(model.getPackaging(), "zip"); + + assertNull(model.getName()); + assertNull(model.getDescription()); + + assertEquals(0, model.getDevelopers().size()); + assertEquals(0, model.getContributors().size()); + assertEquals(0, model.getLicenses().size()); + } + + /** + * In some cases we need the POM groupId value to be different from the Gradle "project.group" value hence we + * allow for groupId customization (it will override whatever the Gradle "project.group" value is). + */ + @Test + public void customizedGroupValue() throws IOException, URISyntaxException, XmlPullParserException { + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle"); + BuildResult result = runner.build(); + + /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); - // Parse the maven file and validate the groupID to org.opensearch.plugin + assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); + + // Parse the maven file and validate the groupID MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read( new FileReader( - new File(projectDir.getRoot(), "local-staging-repo/org/opensearch/plugin/sample-plugin/2.0.0.0/sample-plugin-2.0.0.0.pom") + new File( + projectDir.getRoot(), + String.join( + File.separator, + "build", + "local-staging-repo", + "I", + "am", + "customized", + PROJECT_NAME, + "2.0.0.0", + PROJECT_NAME + "-2.0.0.0.pom" + ) + ) ) ); - assertEquals(model.getGroupId(), "org.opensearch.plugin"); - assertEquals(model.getUrl(), "https://github.com/opensearch-project/OpenSearch"); + + assertEquals(model.getGroupId(), "I.am.customized"); } - protected Project prepareProjectForPublishTask(String zipPublishTask) throws IOException { - Project project = ProjectBuilder.builder().build(); - - // Apply the opensearch.pluginzip plugin - project.getPluginManager().apply("opensearch.pluginzip"); - // Check if the plugin has been applied to the project - assertTrue(project.getPluginManager().hasPlugin("opensearch.pluginzip")); - // Check if the project has the task from class PublishToMavenRepository after plugin apply - assertNotNull(project.getTasks().withType(PublishToMavenRepository.class)); - // Create a mock bundlePlugin task - Zip task = project.getTasks().create("bundlePlugin", Zip.class); - Publish.configMaven(project); - // Check if the main task publishPluginZipPublicationToZipStagingRepository exists after plugin apply - assertTrue(project.getTasks().getNames().contains(zipPublishTask)); - assertNotNull("Task to generate: ", project.getTasks().getByName(zipPublishTask)); - // Run Gradle functional tests, but calling a build.gradle file, that resembles the plugin publish behavior - - // Create a sample plugin zip file - File sampleZip = new File(projectDir.getRoot(), "sample-plugin.zip"); - Files.createFile(sampleZip.toPath()); - writeString(projectDir.newFile("settings.gradle"), ""); - - return project; + /** + * If the customized groupId value is invalid (from the Maven POM perspective) then we need to be sure it is + * caught and reported properly. + */ + @Test + public void customizedInvalidGroupValue() throws IOException, URISyntaxException { + GradleRunner runner = prepareGradleRunnerFromTemplate("customizedInvalidGroupValue.gradle"); + Exception e = assertThrows(UnexpectedBuildFailure.class, runner::build); + assertTrue( + e.getMessage().contains("Invalid publication 'pluginZip': groupId ( ) is not a valid Maven identifier ([A-Za-z0-9_\\-.]+).") + ); + } + + private GradleRunner prepareGradleRunnerFromTemplate(String templateName) throws IOException, URISyntaxException { + useTemplateFile(projectDir.newFile("build.gradle"), templateName); + prepareGradleFilesAndSources(); + + GradleRunner runner = GradleRunner.create() + .forwardOutput() + .withPluginClasspath() + .withArguments("build", ZIP_PUBLISH_TASK) + .withProjectDir(projectDir.getRoot()); + + return runner; + } + + private void prepareGradleFilesAndSources() throws IOException { + // A dummy "source" file that is processed with bundlePlugin and put into a ZIP artifact file + File bundleFile = new File(projectDir.getRoot(), PROJECT_NAME + "-source.txt"); + Path zipFile = Files.createFile(bundleFile.toPath()); + // Setting a project name via settings.gradle file + writeString(projectDir.newFile("settings.gradle"), "rootProject.name = '" + PROJECT_NAME + "'"); } private void writeString(File file, String string) throws IOException { @@ -208,4 +257,24 @@ private void writeString(File file, String string) throws IOException { } } + /** + * Write the content of the "template" file into the target file. + * The template file must be located in the {@value TEMPLATE_RESOURCE_FOLDER} folder. + * @param targetFile A target file + * @param templateFile A name of the template file located under {@value TEMPLATE_RESOURCE_FOLDER} folder + */ + private void useTemplateFile(File targetFile, String templateFile) throws IOException, URISyntaxException { + + URL resource = getClass().getClassLoader().getResource(String.join(File.separator, TEMPLATE_RESOURCE_FOLDER, templateFile)); + Path resPath = Paths.get(resource.toURI()).toAbsolutePath(); + List lines = Files.readAllLines(resPath, StandardCharsets.UTF_8); + + try (Writer writer = new FileWriter(targetFile)) { + for (String line : lines) { + writer.write(line); + writer.write(System.lineSeparator()); + } + } + } + } diff --git a/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle new file mode 100644 index 0000000000000..1bde3edda2d91 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/customizedGroupValue.gradle @@ -0,0 +1,45 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + groupId = "I.am.customized" + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle new file mode 100644 index 0000000000000..b6deeeb12ca6a --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/customizedInvalidGroupValue.gradle @@ -0,0 +1,45 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + groupId = " " // <-- User provides invalid value + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle new file mode 100644 index 0000000000000..bdab385f6082c --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/groupAndVersionValue.gradle @@ -0,0 +1,44 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + pom { + name = "sample-plugin" + description = "pluginDescription" + licenses { + license { + name = "The Apache License, Version 2.0" + url = "http://www.apache.org/licenses/LICENSE-2.0.txt" + } + } + developers { + developer { + name = "John Doe" + url = "https://github.com/john-doe/" + organization = "Doe.inc" + organizationUrl = "https://doe.inc/" + } + } + url = "https://github.com/doe/sample-plugin" + scm { + url = "https://github.com/doe/sample-plugin" + } + } + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle new file mode 100644 index 0000000000000..602c178ea1a5b --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingGroupValue.gradle @@ -0,0 +1,22 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +//group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + } + } +} diff --git a/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle new file mode 100644 index 0000000000000..2cc67c2e98954 --- /dev/null +++ b/buildSrc/src/test/resources/pluginzip/missingPOMEntity.gradle @@ -0,0 +1,22 @@ +plugins { + id 'java-gradle-plugin' + id 'nebula.maven-base-publish' + id 'opensearch.pluginzip' +} + +group="org.custom.group" +version='2.0.0.0' + +// A bundlePlugin task mockup +tasks.register('bundlePlugin', Zip.class) { + archiveFileName = "sample-plugin-${version}.zip" + destinationDirectory = layout.buildDirectory.dir('distributions') + from layout.projectDirectory.file('sample-plugin-source.txt') +} + +publishing { + publications { + pluginZip(MavenPublication) { + } + } +} From f4e041ec5b178db0bb80db167dc99ac3fdc3eb09 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Mon, 29 Aug 2022 13:43:44 -0700 Subject: [PATCH 11/78] [Segment Replication] Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test (#4314) * [Segment Replication] testReplicationOnDone Add timeout to allow time for verify call Signed-off-by: Suraj Singh * Update changelog Signed-off-by: Suraj Singh * Add change log entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationTargetServiceTests.java | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52fa12d523659..b3c5d731af082 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) - Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) +- Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) ### Security diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index d3a6d1a97dacc..de739f4ca834a 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -252,9 +252,8 @@ public void testReplicationOnDone() throws IOException { SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex())); doNothing().when(spy).onNewCheckpoint(any(), any()); - verify(spy, timeout(0).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); + verify(spy, timeout(100).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); closeShard(indexShard, false); - } public void testBeforeIndexShardClosed_CancelsOngoingReplications() { From beb09af65710aa17909f977a61b7a5414f7967cc Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Tue, 30 Aug 2022 11:17:11 -0700 Subject: [PATCH 12/78] Adding @dreamer-89 to Opensearch maintainers. (#4342) Signed-off-by: Kartik Ganesh Signed-off-by: Kartik Ganesh --- CHANGELOG.md | 1 + MAINTAINERS.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3c5d731af082..c7f17dac5bf13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 94e649a634c7f..2f54656b2ab59 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -23,6 +23,7 @@ | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | | Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | +| Suraj Singh |[dreamer-89](https://github.com/dreamer-89) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | From 4bccdbe9bdf474d79d3ff9e68e53267174b001ac Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Tue, 30 Aug 2022 13:21:15 -0700 Subject: [PATCH 13/78] [CVE] Update snakeyaml dependency (#4341) The package `org.yaml:snakeyaml` before version 1.31 are vulnerable to Denial of Service (DoS) due missing to nested depth limitation for collections. Details at https://nvd.nist.gov/vuln/detail/CVE-2022-25857 Signed-off-by: Rabi Panda --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- libs/x-content/licenses/snakeyaml-1.26.jar.sha1 | 1 - libs/x-content/licenses/snakeyaml-1.31.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 libs/x-content/licenses/snakeyaml-1.26.jar.sha1 create mode 100644 libs/x-content/licenses/snakeyaml-1.31.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c7f17dac5bf13..877e472ac66d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) ### Security +- CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] ### Added diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 876910d5351d0..072dcc4578977 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -11,7 +11,7 @@ spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.3 jackson_databind = 2.13.3 -snakeyaml = 1.26 +snakeyaml = 1.31 icu4j = 70.1 supercsv = 2.4.0 log4j = 2.17.1 diff --git a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 deleted file mode 100644 index fde3aba8edad0..0000000000000 --- a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a78a8747147d2c5807683e76ec2b633e95c14fe9 \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 new file mode 100644 index 0000000000000..1ac9b78b88687 --- /dev/null +++ b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 @@ -0,0 +1 @@ +cf26b7b05fef01e7bec00cb88ab4feeeba743e12 \ No newline at end of file From 82bda895ccf039c48ea73c68c845908cd6289381 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 30 Aug 2022 15:33:07 -0700 Subject: [PATCH 14/78] Fixed commit workflow for dependabot PR helper (#4331) * Fixed label for dependabot PR helper Signed-off-by: Kunal Kotwani * Update autocommit workflow for dependabot changelog Signed-off-by: Kunal Kotwani * Add version config for dependabot changelog helper Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 6 ++++++ CHANGELOG.md | 2 ++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index ee9bf5e18d0d5..ac0c0ec4d7297 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -14,9 +14,15 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 + with: + version: 'Unreleased' - uses: stefanzweifel/git-auto-commit-action@v4 with: commit_message: "Update changelog" + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' - uses: dangoslen/changelog-enforcer@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 877e472ac66d4..9efafb1e69a50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) @@ -38,6 +39,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security From 48d68699299346873d12bcfaf1013c148a0fe166 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Tue, 30 Aug 2022 15:41:59 -0700 Subject: [PATCH 15/78] Add release notes for patch release 1.3.5 (#4343) Signed-off-by: Rabi Panda --- CHANGELOG.md | 1 + release-notes/opensearch.release-notes-1.3.5.md | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.5.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 9efafb1e69a50..8c000c02e86ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) +- Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/release-notes/opensearch.release-notes-1.3.5.md b/release-notes/opensearch.release-notes-1.3.5.md new file mode 100644 index 0000000000000..fbf866bb6e112 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.5.md @@ -0,0 +1,9 @@ +## 2022-08-30 Version 1.3.5 Release Notes + +### Upgrades +* OpenJDK Update (July 2022 Patch releases) ([#4097](https://github.com/opensearch-project/OpenSearch/pull/4097)) +* Update Netty to 4.1.79.Final ([#3868](https://github.com/opensearch-project/OpenSearch/pull/3868)) + +### Bug Fixes +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/OpenSearch/pull/4143)) +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) From f16ea9c8ec0a181c77b57388b2f3a322bcb47814 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Tue, 30 Aug 2022 16:25:39 -0700 Subject: [PATCH 16/78] Add release notes for patch release 2.2.1 (#4344) Signed-off-by: Rabi Panda --- CHANGELOG.md | 1 + release-notes/opensearch.release-notes-2.2.1.md | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.2.1.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c000c02e86ba..3b3d54a802e67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) +- Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/release-notes/opensearch.release-notes-2.2.1.md b/release-notes/opensearch.release-notes-2.2.1.md new file mode 100644 index 0000000000000..974ff8e09a426 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.2.1.md @@ -0,0 +1,7 @@ +## 2022-08-30 Version 2.2.1 Release Notes + +### Upgrades +* Update Gradle to 7.5.1 ([#4211](https://github.com/opensearch-project/OpenSearch/pull/4211)) + +### Bug Fixes +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) From 4f65ef58ef7e4836b93cbe12afcafdd07fea12a8 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 30 Aug 2022 17:34:56 -0700 Subject: [PATCH 17/78] Add label configuration for dependabot PRs (#4348) --- .github/dependabot.yml | 522 +++++++++++++++++++++++++++++++++++++++++ CHANGELOG.md | 2 + 2 files changed, 524 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9682461d9e110..07755ef69c6a3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,869 +4,1391 @@ updates: package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /benchmarks/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch-build-resources/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch.build/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/symbolic-link-preserving-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/testingConventions/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/sample_jars/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/benchmark/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/client-benchmark-noop-api-plugin/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest-high-level/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/sniffer/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/integ-test-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-arm64-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/maintenance/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/staged/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-arm64-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-build-context/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/java-version-checker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/keystore-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/launchers/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/plugin-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/upgrade-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/missing-doclet/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/core/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/dissect/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/grok/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/plugin-classloader/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/secure-sm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ssl-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/x-content/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/aggs-matrix-stats/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/analysis-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-geoip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-user-agent/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-expression/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-mustache/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/spi/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/mapper-extras/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/opensearch-dashboards/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/parent-join/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/percolator/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/rank-eval/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/reindex/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/repository-url/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/systemd/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/transport-netty4/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-icu/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-kuromoji/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-nori/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-phonetic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-smartcn/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-stempel/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-ukrainian/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-azure-classic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/amazon-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-settings/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-significance-heuristic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-suggester/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/painless-allowlist/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rescore/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rest-handler/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/script-expert-scoring/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ingest-attachment/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-annotated-text/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-murmur3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-size/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-azure/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-gcs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-hdfs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-s3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/store-smb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/transport-nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ccs-unavailable-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/die-with-dignity/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/evil-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/full-cluster-restart/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/logging-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/mixed-cluster/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/multi-cluster-search/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/no-bootstrap-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-8/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-9/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-28/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-29/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/sles-12/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1604/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1804/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2012r2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2016/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/remote-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/repository-multi-version/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/rolling-upgrade/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-http/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-disabled/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-with-all-dependencies/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-multinode/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/translog-policy/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/unconfigured-node-name/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/verify-version-constants/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/wildfly/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /rest-api-spec/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /server/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/delayed-aggs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/azure-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/gcs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/hdfs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/krb5kdc-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/minio-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/old-elasticsearch/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/s3-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/framework/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/logger-usage/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" version: 2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b3d54a802e67..1be3d3f53f2d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) @@ -32,6 +33,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [2.x] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) ### Changed From d72861f9de379d2a263948232947e9b95aefa962 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 31 Aug 2022 09:07:44 -0400 Subject: [PATCH 18/78] Support for HTTP/2 (server-side) (#3847) * Support for HTTP/2 (server-side) Signed-off-by: Andriy Redko * Addressing code review comments Signed-off-by: Andriy Redko * Added HTTP/1.1 channel configuration Signed-off-by: Andriy Redko * Addressing code review comments Signed-off-by: Andriy Redko * Update pul request URL in CHANGELOG.md Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + modules/transport-netty4/build.gradle | 1 + .../netty-codec-http2-4.1.79.Final.jar.sha1 | 1 + .../opensearch/http/netty4/Netty4Http2IT.java | 62 ++++++ .../netty4/Netty4HttpRequestSizeLimitIT.java | 4 +- .../http/netty4/Netty4PipeliningIT.java | 2 +- .../http/netty4/Netty4HttpChannel.java | 13 ++ .../netty4/Netty4HttpServerTransport.java | 160 ++++++++++++-- .../http/netty4/Netty4BadRequestTests.java | 2 +- .../http/netty4/Netty4HttpClient.java | 197 +++++++++++++++++- .../Netty4HttpServerPipeliningTests.java | 7 +- .../Netty4HttpServerTransportTests.java | 10 +- 12 files changed, 428 insertions(+), 32 deletions(-) create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 1be3d3f53f2d6..8e7fa8b5547f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index b72cb6d868d79..5d2047d7f18a2 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -58,6 +58,7 @@ dependencies { api "io.netty:netty-buffer:${versions.netty}" api "io.netty:netty-codec:${versions.netty}" api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" api "io.netty:netty-common:${versions.netty}" api "io.netty:netty-handler:${versions.netty}" api "io.netty:netty-resolver:${versions.netty}" diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..f2989024cfce1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java new file mode 100644 index 0000000000000..1424b392af8e7 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4Http2IT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsHttp2() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http2()) { + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInAnyOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInAnyOrder(Collection opaqueIds) { + // check if opaque ids are present in any order, since for HTTP/2 we use streaming (no head of line blocking) + // and responses may come back at any order + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be in any order, got [%s]", opaqueIds); + assertThat(msg, opaqueIds, containsInAnyOrder(IntStream.range(0, 5).mapToObj(Integer::toString).toArray())); + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 08df9259d475f..db76c0b145840 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -100,7 +100,7 @@ public void testLimitsInFlightRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); try { assertThat(singleResponse, hasSize(1)); @@ -130,7 +130,7 @@ public void testDoesNotLimitExcludedRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); try { assertThat(responses, hasSize(requestUris.size())); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java index 2bd1fa07f8afc..96193b0ecb954 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java @@ -61,7 +61,7 @@ public void testThatNettyHttpServerSupportsPipelining() throws Exception { TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); TransportAddress transportAddress = randomFrom(boundAddresses); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests); try { assertThat(responses, hasSize(5)); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 66d60032d11a8..2dd7aaf41986f 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -33,7 +33,10 @@ package org.opensearch.http.netty4; import io.netty.channel.Channel; +import io.netty.channel.ChannelPipeline; + import org.opensearch.action.ActionListener; +import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.CompletableContext; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; @@ -45,9 +48,15 @@ public class Netty4HttpChannel implements HttpChannel { private final Channel channel; private final CompletableContext closeContext = new CompletableContext<>(); + private final ChannelPipeline inboundPipeline; Netty4HttpChannel(Channel channel) { + this(channel, null); + } + + Netty4HttpChannel(Channel channel, ChannelPipeline inboundPipeline) { this.channel = channel; + this.inboundPipeline = inboundPipeline; Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @@ -81,6 +90,10 @@ public void close() { channel.close(); } + public @Nullable ChannelPipeline inboundPipeline() { + return inboundPipeline; + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index decab45ffca38..1e0a4d89f2fd5 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -40,18 +40,36 @@ import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpMessage; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodecFactory; +import io.netty.handler.codec.http2.CleartextHttp2ServerUpgradeHandler; +import io.netty.handler.codec.http2.Http2CodecUtil; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2ServerUpgradeCodec; +import io.netty.handler.codec.http2.Http2StreamFrameToHttpObjectCodec; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; +import io.netty.util.AsciiString; import io.netty.util.AttributeKey; +import io.netty.util.ReferenceCountUtil; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; @@ -335,38 +353,152 @@ protected HttpChannelHandler(final Netty4HttpServerTransport transport, final Ht this.responseCreator = new Netty4HttpResponseCreator(); } + public ChannelHandler getRequestHandler() { + return requestHandler; + } + @Override protected void initChannel(Channel ch) throws Exception { Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(ch); ch.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); ch.pipeline().addLast("byte_buf_sizer", byteBufSizer); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); + + configurePipeline(ch); + transport.serverAcceptedChannel(nettyHttpChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + super.exceptionCaught(ctx, cause); + } + + protected void configurePipeline(Channel ch) { + final UpgradeCodecFactory upgradeCodecFactory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + if (AsciiString.contentEquals(Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME, protocol)) { + return new Http2ServerUpgradeCodec( + Http2FrameCodecBuilder.forServer().build(), + new Http2MultiplexHandler(createHttp2ChannelInitializer(ch.pipeline())) + ); + } else { + return null; + } + } + }; + + final HttpServerCodec sourceCodec = new HttpServerCodec( + handlingSettings.getMaxInitialLineLength(), + handlingSettings.getMaxHeaderSize(), + handlingSettings.getMaxChunkSize() + ); + + final HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(sourceCodec, upgradeCodecFactory); + final CleartextHttp2ServerUpgradeHandler cleartextUpgradeHandler = new CleartextHttp2ServerUpgradeHandler( + sourceCodec, + upgradeHandler, + createHttp2ChannelInitializerPriorKnowledge() + ); + + ch.pipeline().addLast(cleartextUpgradeHandler).addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws Exception { + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP + final ChannelPipeline pipeline = ctx.pipeline(); + pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); + pipeline.replace(this, "aggregator", aggregator); + + ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); + ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + if (handlingSettings.isCompression()) { + ch.pipeline() + .addAfter("aggregator", "encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + ch.pipeline().addBefore("handler", "request_creator", requestCreator); + ch.pipeline().addBefore("handler", "response_creator", responseCreator); + ch.pipeline() + .addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + + ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); + } + }); + } + + protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { final HttpRequestDecoder decoder = new HttpRequestDecoder( handlingSettings.getMaxInitialLineLength(), handlingSettings.getMaxHeaderSize(), handlingSettings.getMaxChunkSize() ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); - ch.pipeline().addLast("decoder", decoder); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("decoder", decoder); + pipeline.addLast("decoder_compress", new HttpContentDecompressor()); + pipeline.addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - ch.pipeline().addLast("aggregator", aggregator); + pipeline.addLast("aggregator", aggregator); if (handlingSettings.isCompression()) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } - ch.pipeline().addLast("request_creator", requestCreator); - ch.pipeline().addLast("response_creator", responseCreator); - ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); - ch.pipeline().addLast("handler", requestHandler); - transport.serverAcceptedChannel(nettyHttpChannel); + pipeline.addLast("request_creator", requestCreator); + pipeline.addLast("response_creator", responseCreator); + pipeline.addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + pipeline.addLast("handler", requestHandler); } - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - super.exceptionCaught(ctx, cause); + protected void configureDefaultHttp2Pipeline(ChannelPipeline pipeline) { + pipeline.addLast(Http2FrameCodecBuilder.forServer().build()) + .addLast(new Http2MultiplexHandler(createHttp2ChannelInitializer(pipeline))); + } + + private ChannelInitializer createHttp2ChannelInitializerPriorKnowledge() { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + configureDefaultHttp2Pipeline(childChannel.pipeline()); + } + }; + } + + /** + * Http2MultiplexHandler creates new pipeline, we are preserving the old one in case some handlers need to be + * access (like for example opensearch-security plugin which accesses SSL handlers). + */ + private ChannelInitializer createHttp2ChannelInitializer(ChannelPipeline inboundPipeline) { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + final Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(childChannel, inboundPipeline); + childChannel.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); + + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + childChannel.pipeline() + .addLast(new LoggingHandler(LogLevel.DEBUG)) + .addLast(new Http2StreamFrameToHttpObjectCodec(true)) + .addLast("byte_buf_sizer", byteBufSizer) + .addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)) + .addLast("decoder_decompress", new HttpContentDecompressor()); + + if (handlingSettings.isCompression()) { + childChannel.pipeline() + .addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + + childChannel.pipeline() + .addLast("aggregator", aggregator) + .addLast("request_creator", requestCreator) + .addLast("response_creator", responseCreator) + .addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)) + .addLast("handler", getRequestHandler()); + } + }; } } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index a0100930c7dcb..c18fe6efc4736 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -117,7 +117,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, httpServerTransport.start(); final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { final Collection responses = nettyHttpClient.get( transportAddress.address(), "/_cluster/settings?pretty=%" diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index 57f95a022a33f..6fdd698c117f2 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -37,14 +37,19 @@ import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpClientUpgradeHandler; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; @@ -55,6 +60,17 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.DefaultHttp2Connection; +import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener; +import io.netty.handler.codec.http2.Http2ClientUpgradeCodec; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandlerBuilder; +import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; +import io.netty.util.AttributeKey; + import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; @@ -70,6 +86,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; @@ -97,11 +114,32 @@ static Collection returnOpaqueIds(Collection responses } private final Bootstrap clientBootstrap; + private final BiFunction, AwaitableChannelInitializer> handlerFactory; + + Netty4HttpClient( + Bootstrap clientBootstrap, + BiFunction, AwaitableChannelInitializer> handlerFactory + ) { + this.clientBootstrap = clientBootstrap; + this.handlerFactory = handlerFactory; + } + + static Netty4HttpClient http() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp::new + ); + } - Netty4HttpClient() { - clientBootstrap = new Bootstrap().channel(NettyAllocator.getChannelType()) - .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) - .group(new NioEventLoopGroup(1)); + static Netty4HttpClient http2() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp2::new + ); } public List get(SocketAddress remoteAddress, String... uris) throws InterruptedException { @@ -110,6 +148,7 @@ public List get(SocketAddress remoteAddress, String... uris) t final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(httpRequest); } return sendRequests(remoteAddress, requests); @@ -143,6 +182,7 @@ private List processRequestsWithBody( request.headers().add(HttpHeaderNames.HOST, "localhost"); request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(request); } return sendRequests(remoteAddress, requests); @@ -153,12 +193,14 @@ private synchronized List sendRequests(final SocketAddress rem final CountDownLatch latch = new CountDownLatch(requests.size()); final List content = Collections.synchronizedList(new ArrayList<>(requests.size())); - clientBootstrap.handler(new CountDownLatchHandler(latch, content)); + final AwaitableChannelInitializer handler = handlerFactory.apply(latch, content); + clientBootstrap.handler(handler); ChannelFuture channelFuture = null; try { channelFuture = clientBootstrap.connect(remoteAddress); channelFuture.sync(); + handler.await(); for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); @@ -184,12 +226,12 @@ public void close() { /** * helper factory which adds returned data to a list and uses a count down latch to decide when done */ - private static class CountDownLatchHandler extends ChannelInitializer { + private static class CountDownLatchHandlerHttp extends AwaitableChannelInitializer { private final CountDownLatch latch; private final Collection content; - CountDownLatchHandler(final CountDownLatch latch, final Collection content) { + CountDownLatchHandlerHttp(final CountDownLatch latch, final Collection content) { this.latch = latch; this.content = content; } @@ -222,4 +264,145 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } + /** + * The channel initializer with the ability to await for initialization to be completed + * + */ + private static abstract class AwaitableChannelInitializer extends ChannelInitializer { + void await() { + // do nothing + } + } + + /** + * helper factory which adds returned data to a list and uses a count down latch to decide when done + */ + private static class CountDownLatchHandlerHttp2 extends AwaitableChannelInitializer { + + private final CountDownLatch latch; + private final Collection content; + private Http2SettingsHandler settingsHandler; + + CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content) { + this.latch = latch; + this.content = content; + } + + @Override + protected void initChannel(SocketChannel ch) { + final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); + final Http2Connection connection = new DefaultHttp2Connection(false); + settingsHandler = new Http2SettingsHandler(ch.newPromise()); + + final ChannelInboundHandler responseHandler = new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { + final FullHttpResponse response = (FullHttpResponse) msg; + + // this is upgrade request, skipping it over + if (Boolean.TRUE.equals(ctx.channel().attr(AttributeKey.valueOf("upgrade")).getAndRemove())) { + return; + } + + // We copy the buffer manually to avoid a huge allocation on a pooled allocator. We have + // a test that tracks huge allocations, so we want to avoid them in this test code. + ByteBuf newContent = Unpooled.copiedBuffer(((FullHttpResponse) msg).content()); + content.add(response.replace(newContent)); + latch.countDown(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + latch.countDown(); + } + }; + + final HttpToHttp2ConnectionHandler connectionHandler = new HttpToHttp2ConnectionHandlerBuilder().connection(connection) + .frameListener( + new DelegatingDecompressorFrameListener( + connection, + new InboundHttp2ToHttpAdapterBuilder(connection).maxContentLength(maxContentLength).propagateSettings(true).build() + ) + ) + .build(); + + final HttpClientCodec sourceCodec = new HttpClientCodec(); + final Http2ClientUpgradeCodec upgradeCodec = new Http2ClientUpgradeCodec(connectionHandler); + final HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(sourceCodec, upgradeCodec, maxContentLength); + + ch.pipeline().addLast(sourceCodec); + ch.pipeline().addLast(upgradeHandler); + ch.pipeline().addLast(new HttpContentDecompressor()); + ch.pipeline().addLast(new UpgradeRequestHandler(settingsHandler, responseHandler)); + } + + @Override + void await() { + try { + // Await for HTTP/2 settings being sent over before moving on to sending the requests + settingsHandler.awaitSettings(5, TimeUnit.SECONDS); + } catch (final Exception ex) { + throw new RuntimeException(ex); + } + } + } + + /** + * A handler that triggers the cleartext upgrade to HTTP/2 (h2c) by sending an + * initial HTTP request. + */ + private static class UpgradeRequestHandler extends ChannelInboundHandlerAdapter { + private final ChannelInboundHandler settingsHandler; + private final ChannelInboundHandler responseHandler; + + UpgradeRequestHandler(final ChannelInboundHandler settingsHandler, final ChannelInboundHandler responseHandler) { + this.settingsHandler = settingsHandler; + this.responseHandler = responseHandler; + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + // The first request is HTTP/2 protocol upgrade (since we support only h2c there) + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + + ctx.channel().attr(AttributeKey.newInstance("upgrade")).set(true); + ctx.writeAndFlush(request); + ctx.fireChannelActive(); + + ctx.pipeline().remove(this); + ctx.pipeline().addLast(settingsHandler); + ctx.pipeline().addLast(responseHandler); + } + } + + private static class Http2SettingsHandler extends SimpleChannelInboundHandler { + private ChannelPromise promise; + + Http2SettingsHandler(ChannelPromise promise) { + this.promise = promise; + } + + /** + * Wait for this handler to be added after the upgrade to HTTP/2, and for initial preface + * handshake to complete. + */ + void awaitSettings(long timeout, TimeUnit unit) throws Exception { + if (!promise.awaitUninterruptibly(timeout, unit)) { + throw new IllegalStateException("Timed out waiting for HTTP/2 settings"); + } + if (!promise.isSuccess()) { + throw new RuntimeException(promise.cause()); + } + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Settings msg) throws Exception { + promise.setSuccess(); + ctx.pipeline().remove(this); + } + } + } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index 029aed1f3cc89..cda66b8d828fa 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -109,7 +109,7 @@ public void testThatHttpPipeliningWorks() throws Exception { } } - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[] {})); try { Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); @@ -163,9 +163,12 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - ch.pipeline().replace("handler", "handler", new PossiblySlowUpstreamHandler(executorService)); } + @Override + public ChannelHandler getRequestHandler() { + return new PossiblySlowUpstreamHandler(executorService); + } } class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index ec879e538fe20..eb96f14f10c70 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -202,7 +202,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); request.headers().set(HttpHeaderNames.EXPECT, expectation); HttpUtil.setContentLength(request, contentLength); @@ -322,7 +322,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); @@ -384,7 +384,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); long numOfHugeAllocations = getHugeAllocationCount(); @@ -454,7 +454,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); // Test pre-flight request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); @@ -471,7 +471,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } // Test short-circuited request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); request.headers().add(CorsHandler.ORIGIN, "google.com"); From c28221e0176ad3ce782c18a2b23ea2e59ed0e0a8 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 31 Aug 2022 09:41:37 -0700 Subject: [PATCH 19/78] Fix token usage for changelog helper (#4351) Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 13 ++++++++++++- CHANGELOG.md | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index ac0c0ec4d7297..fb4f8ea3f8ecc 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -7,10 +7,21 @@ jobs: # Enforces the update of a changelog file on every pull request verify-changelog: runs-on: ubuntu-latest + permissions: + pull-requests: write + contents: write steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - uses: actions/checkout@v3 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ steps.github_app_token.outputs.token }} ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e7fa8b5547f0..c9b8b1041bd9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Token usage for dependabot changelog helper ([#4351](https://github.com/opensearch-project/OpenSearch/pull/4351)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 100120a440c63ac8ebe4cf4bad51de29ce54ebf9 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 31 Aug 2022 11:55:49 -0700 Subject: [PATCH 20/78] Revert "Fix token usage for changelog helper (#4351)" (#4361) This reverts commit c28221e0176ad3ce782c18a2b23ea2e59ed0e0a8. Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 13 +------------ CHANGELOG.md | 1 - 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index fb4f8ea3f8ecc..ac0c0ec4d7297 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -7,21 +7,10 @@ jobs: # Enforces the update of a changelog file on every pull request verify-changelog: runs-on: ubuntu-latest - permissions: - pull-requests: write - contents: write steps: - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v1.5.0 - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 - - uses: actions/checkout@v3 with: - token: ${{ steps.github_app_token.outputs.token }} + token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c9b8b1041bd9e..8e7fa8b5547f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,6 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) -- Token usage for dependabot changelog helper ([#4351](https://github.com/opensearch-project/OpenSearch/pull/4351)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 19d1a2b027fef8b981560969bf428476d700bd07 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 31 Aug 2022 13:12:24 -0700 Subject: [PATCH 21/78] Segment Replication - Implement segment replication event cancellation. (#4225) * Segment Replication. Fix Cancellation of replication events. This PR updates segment replication paths to correctly cancel replication events on the primary and replica. In the source service, any ongoing event for a primary that is sending to a replica that shuts down or is promoted as a new primary are cancelled. In the target service, any ongoing event for a replica that is promoted as a new primary or is fetching from a primary that shuts down. It wires up SegmentReplicationSourceService as an IndexEventListener so that it can respond to events and cancel any ongoing transfer state. This change also includes some test cleanup for segment replication to rely on actual components over mocks. Signed-off-by: Marc Handalian Fix to not start/stop SegmentReplicationSourceService as a lifecycle component with feature flag off. Signed-off-by: Marc Handalian Update logic to properly mark SegmentReplicationTarget as cancelled when cancel initiated by primary. Signed-off-by: Marc Handalian Minor updates from self review. Signed-off-by: Marc Handalian * Add missing changelog entry. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + .../cluster/IndicesClusterStateService.java | 5 + .../OngoingSegmentReplications.java | 22 +- .../PrimaryShardReplicationSource.java | 6 + .../replication/SegmentReplicationSource.java | 6 + .../SegmentReplicationSourceHandler.java | 11 + .../SegmentReplicationSourceService.java | 44 ++- .../replication/SegmentReplicationState.java | 15 +- .../replication/SegmentReplicationTarget.java | 31 +- .../SegmentReplicationTargetService.java | 53 +++- .../main/java/org/opensearch/node/Node.java | 7 + .../SegmentReplicationIndexShardTests.java | 275 ++++++++++++++++++ ...ClusterStateServiceRandomUpdatesTests.java | 2 + .../OngoingSegmentReplicationsTests.java | 49 ++++ .../PrimaryShardReplicationSourceTests.java | 37 +++ .../SegmentReplicationSourceHandlerTests.java | 46 +++ .../SegmentReplicationTargetServiceTests.java | 200 +++++-------- .../snapshots/SnapshotResiliencyTests.java | 2 + .../index/shard/IndexShardTestCase.java | 118 +++++--- 19 files changed, 737 insertions(+), 193 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e7fa8b5547f0..4d07052d55ff0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 8884ef2cddd0a..15a9bf9e4c492 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -81,6 +81,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; @@ -152,6 +153,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final PeerRecoveryTargetService recoveryTargetService, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, @@ -170,6 +172,7 @@ public IndicesClusterStateService( threadPool, checkpointPublisher, segmentReplicationTargetService, + segmentReplicationSourceService, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -191,6 +194,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final SegmentReplicationCheckpointPublisher checkpointPublisher, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -211,6 +215,7 @@ public IndicesClusterStateService( // if segrep feature flag is not enabled, don't wire the target serivce as an IndexEventListener. if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { indexEventListeners.add(segmentReplicationTargetService); + indexEventListeners.add(segmentReplicationSourceService); } this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index dfebe5f7cabf2..828aa29192fe3 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -37,7 +37,6 @@ * @opensearch.internal */ class OngoingSegmentReplications { - private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; @@ -161,6 +160,20 @@ synchronized void cancel(IndexShard shard, String reason) { cancelHandlers(handler -> handler.getCopyState().getShard().shardId().equals(shard.shardId()), reason); } + /** + * Cancel all Replication events for the given allocation ID, intended to be called when a primary is shutting down. + * + * @param allocationId {@link String} - Allocation ID. + * @param reason {@link String} - Reason for the cancel + */ + synchronized void cancel(String allocationId, String reason) { + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); + if (handler != null) { + handler.cancel(reason); + removeCopyState(handler.getCopyState()); + } + } + /** * Cancel any ongoing replications for a given {@link DiscoveryNode} * @@ -168,7 +181,6 @@ synchronized void cancel(IndexShard shard, String reason) { */ void cancelReplication(DiscoveryNode node) { cancelHandlers(handler -> handler.getTargetNode().equals(node), "Node left"); - } /** @@ -243,11 +255,7 @@ private void cancelHandlers(Predicate p .map(SegmentReplicationSourceHandler::getAllocationId) .collect(Collectors.toList()); for (String allocationId : allocationIds) { - final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); - if (handler != null) { - handler.cancel(reason); - removeCopyState(handler.getCopyState()); - } + cancel(allocationId, reason); } } } diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 08dc0b97b31d5..aa0b5416dd0ff 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -87,4 +87,10 @@ public void getSegmentFiles( ); transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); } + + @Override + public void cancel() { + transportClient.cancel(); + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 8628a266ea7d0..b2e7487fff4b2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.opensearch.action.ActionListener; +import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -47,4 +48,9 @@ void getSegmentFiles( Store store, ActionListener listener ); + + /** + * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. + */ + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 2d21653c1924c..022d90b41d8ee 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -113,6 +113,16 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final Closeable releaseResources = () -> IOUtils.close(resources); try { timer.start(); + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + final RuntimeException e = new CancellableThreads.ExecutionCancelledException( + "replication was canceled reason [" + reason + "]" + ); + if (beforeCancelEx != null) { + e.addSuppressed(beforeCancelEx); + } + IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + throw e; + }); final Consumer onFailure = e -> { assert Transports.assertNotTransportThread(SegmentReplicationSourceHandler.this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); @@ -153,6 +163,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final MultiChunkTransfer transfer = segmentFileTransferHandler .createTransfer(shard.store(), storeFileMetadata, () -> 0, sendFileStep); resources.add(transfer); + cancellableThreads.checkForCancel(); transfer.start(); sendFileStep.whenComplete(r -> { diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 0cee731fde2cb..db3f87201b774 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -42,7 +43,25 @@ * * @opensearch.internal */ -public final class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { +public class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { + + // Empty Implementation, only required while Segment Replication is under feature flag. + public static final SegmentReplicationSourceService NO_OP = new SegmentReplicationSourceService() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + // NoOp; + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + // NoOp; + } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // NoOp; + } + }; private static final Logger logger = LogManager.getLogger(SegmentReplicationSourceService.class); private final RecoverySettings recoverySettings; @@ -62,6 +81,14 @@ public static class Actions { private final OngoingSegmentReplications ongoingSegmentReplications; + // Used only for empty implementation. + private SegmentReplicationSourceService() { + recoverySettings = null; + ongoingSegmentReplications = null; + transportService = null; + indicesService = null; + } + public SegmentReplicationSourceService( IndicesService indicesService, TransportService transportService, @@ -163,10 +190,25 @@ protected void doClose() throws IOException { } + /** + * + * Cancels any replications on this node to a replica shard that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { ongoingSegmentReplications.cancel(indexShard, "shard is closed"); } } + + /** + * Cancels any replications on this node to a replica that has been promoted as primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (indexShard != null && oldRouting.primary() == false && newRouting.primary()) { + ongoingSegmentReplications.cancel(indexShard.routingEntry().allocationId().getId(), "Relocating primary shard."); + } + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index f865ba1332186..2e2e6df007c5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -35,7 +35,8 @@ public enum Stage { GET_CHECKPOINT_INFO((byte) 3), FILE_DIFF((byte) 4), GET_FILES((byte) 5), - FINALIZE_REPLICATION((byte) 6); + FINALIZE_REPLICATION((byte) 6), + CANCELLED((byte) 7); private static final Stage[] STAGES = new Stage[Stage.values().length]; @@ -118,6 +119,10 @@ protected void validateAndSetStage(Stage expected, Stage next) { "can't move replication to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])" ); } + stopTimersAndSetStage(next); + } + + private void stopTimersAndSetStage(Stage next) { // save the timing data for the current step stageTimer.stop(); timingData.add(new Tuple<>(stage.name(), stageTimer.time())); @@ -155,6 +160,14 @@ public void setStage(Stage stage) { overallTimer.stop(); timingData.add(new Tuple<>("OVERALL", overallTimer.time())); break; + case CANCELLED: + if (this.stage == Stage.DONE) { + throw new IllegalStateException("can't move replication to Cancelled state from Done."); + } + stopTimersAndSetStage(Stage.CANCELLED); + overallTimer.stop(); + timingData.add(new Tuple<>("OVERALL", overallTimer.time())); + break; default: throw new IllegalArgumentException("unknown SegmentReplicationState.Stage [" + stage + "]"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index a658ffc09d590..d1d6104a416ca 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -17,6 +17,7 @@ import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.ChecksumIndexInput; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; @@ -103,7 +104,15 @@ public String description() { @Override public void notifyListener(OpenSearchException e, boolean sendShardFailure) { - listener.onFailure(state(), e, sendShardFailure); + // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation + // update the stage. + final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); + if (cancelledException != null) { + state.setStage(SegmentReplicationState.Stage.CANCELLED); + listener.onFailure(state(), (CancellableThreads.ExecutionCancelledException) cancelledException, sendShardFailure); + } else { + listener.onFailure(state(), e, sendShardFailure); + } } @Override @@ -134,6 +143,14 @@ public void writeFileChunk( * @param listener {@link ActionListener} listener. */ public void startReplication(ActionListener listener) { + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + // This method only executes when cancellation is triggered by this node and caught by a call to checkForCancel, + // SegmentReplicationSource does not share CancellableThreads. + final CancellableThreads.ExecutionCancelledException executionCancelledException = + new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); + notifyListener(executionCancelledException, false); + throw executionCancelledException; + }); state.setStage(SegmentReplicationState.Stage.REPLICATING); final StepListener checkpointInfoListener = new StepListener<>(); final StepListener getFilesListener = new StepListener<>(); @@ -141,6 +158,7 @@ public void startReplication(ActionListener listener) { logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); @@ -154,6 +172,7 @@ public void startReplication(ActionListener listener) { private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener getFilesListener) throws IOException { + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.MetadataSnapshot snapshot = checkpointInfo.getSnapshot(); Store.MetadataSnapshot localMetadata = getMetadataSnapshot(); @@ -188,12 +207,14 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener listener) { - state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); ActionListener.completeWith(listener, () -> { + cancellableThreads.checkForCancel(); + state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); multiFileWriter.renameAllTempFiles(); final Store store = store(); store.incRef(); @@ -261,4 +282,10 @@ Store.MetadataSnapshot getMetadataSnapshot() throws IOException { } return store.getMetadata(indexShard.getSegmentInfosSnapshot().get()); } + + @Override + protected void onCancel(String reason) { + cancellableThreads.cancel(reason); + source.cancel(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index a79ce195ad83b..9e6b66dc4d7d6 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,10 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -64,6 +67,11 @@ public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Setti public synchronized void onNewCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { // noOp; } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // noOp; + } }; // Used only for empty implementation. @@ -74,6 +82,10 @@ private SegmentReplicationTargetService() { sourceFactory = null; } + public ReplicationRef get(long replicationId) { + return onGoingReplications.get(replicationId); + } + /** * The internal actions * @@ -102,6 +114,9 @@ public SegmentReplicationTargetService( ); } + /** + * Cancel any replications on this node for a replica that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { @@ -109,11 +124,22 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } + /** + * Cancel any replications on this node for a replica that has just been promoted as the new primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (oldRouting != null && oldRouting.primary() == false && newRouting.primary()) { + onGoingReplications.cancelForShard(indexShard.shardId(), "shard has been promoted to primary"); + } + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. - * @param receivedCheckpoint received checkpoint that is checked for processing - * @param replicaShard replica shard on which checkpoint is received + * + * @param receivedCheckpoint received checkpoint that is checked for processing + * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); @@ -180,12 +206,19 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept } } - public void startReplication( + public SegmentReplicationTarget startReplication( final ReplicationCheckpoint checkpoint, final IndexShard indexShard, final SegmentReplicationListener listener ) { - startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + sourceFactory.get(indexShard), + listener + ); + startReplication(target); + return target; } // pkg-private for integration tests @@ -248,7 +281,17 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { - onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof CancellableThreads.ExecutionCancelledException) { + if (onGoingReplications.getTarget(replicationId) != null) { + // if the target still exists in our collection, the primary initiated the cancellation, fail the replication + // but do not fail the shard. Cancellations initiated by this node from Index events will be removed with + // onGoingReplications.cancel and not appear in the collection when this listener resolves. + onGoingReplications.fail(replicationId, (CancellableThreads.ExecutionCancelledException) cause, false); + } + } else { + onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + } } }); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 3f4eadc52fd2a..92e9815313fa0 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -969,6 +969,7 @@ protected Node( .toInstance(new SegmentReplicationSourceService(indicesService, transportService, recoverySettings)); } else { b.bind(SegmentReplicationTargetService.class).toInstance(SegmentReplicationTargetService.NO_OP); + b.bind(SegmentReplicationSourceService.class).toInstance(SegmentReplicationSourceService.NO_OP); } } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); @@ -1112,6 +1113,9 @@ public Node start() throws NodeValidationException { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + injector.getInstance(SegmentReplicationSourceService.class).start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); @@ -1287,6 +1291,9 @@ public synchronized void close() throws IOException { // close filter/fielddata caches after indices toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(injector.getInstance(PeerRecoverySourceService.class)); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + toClose.add(injector.getInstance(SegmentReplicationSourceService.class)); + } toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 23371a39871c7..88a3bdad53d0c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,11 +8,18 @@ package org.opensearch.index.shard; +import org.junit.Assert; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -21,12 +28,28 @@ import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static java.util.Arrays.asList; import static org.hamcrest.Matchers.equalTo; @@ -34,6 +57,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -241,6 +265,213 @@ public void testNRTReplicaPromotedAsPrimary() throws Exception { } } + public void testReplicaPromotedWhileReplicating() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + oldPrimary.refresh("Test"); + shards.syncGlobalCheckpoint(); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, oldPrimary); + ShardRouting oldRouting = nextPrimary.shardRouting; + try { + shards.promoteReplicaToPrimary(nextPrimary); + } catch (IOException e) { + Assert.fail("Promotion should not fail"); + } + targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(nextPrimary, targetService); + // wait for replica to finish being promoted, and assert doc counts. + final CountDownLatch latch = new CountDownLatch(1); + nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); + nextPrimary.refresh("test"); + + oldPrimary.close("demoted", false); + oldPrimary.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + assertDocCount(nextPrimary, numDocs); + assertDocCount(newReplica, numDocs); + + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, shards.getReplicas()); + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + // trigger a cancellation by closing the replica. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be reached"); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + // randomly resolve the listener, indicating the source has resolved. + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testPrimaryCancelsExecution() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) {} + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + private SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + } + /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. @@ -253,4 +484,48 @@ private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCoun // processed cp should be 1 less than our searchable doc count. assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); } + + private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primary.shardId), primary); + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } + } + + private void startReplicationAndAssertCancellation(IndexShard replica, SegmentReplicationTargetService targetService) + throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final SegmentReplicationTarget target = targetService.startReplication( + ReplicationCheckpoint.empty(replica.shardId), + replica, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail("Replication should not complete"); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertTrue(e instanceof CancellableThreads.ExecutionCancelledException); + assertFalse(sendShardFailure); + assertEquals(SegmentReplicationState.Stage.CANCELLED, state.getStage()); + latch.countDown(); + } + } + ); + + latch.await(2, TimeUnit.SECONDS); + assertEquals("Should have resolved listener with failure", 0, latch.getCount()); + assertNull(targetService.get(target.getId())); + } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 1f2360abde2ad..22481b5a7b99f 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; @@ -572,6 +573,7 @@ private IndicesClusterStateService createIndicesClusterStateService( threadPool, SegmentReplicationCheckpointPublisher.EMPTY, SegmentReplicationTargetService.NO_OP, + SegmentReplicationSourceService.NO_OP, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 38c55620e1223..f49ee0471b5e8 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -14,6 +14,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -31,6 +33,8 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -154,6 +158,51 @@ public void testCancelReplication() throws IOException { assertEquals(0, replications.cachedCopyStateSize()); } + public void testCancelReplication_AfterSendFilesStarts() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + // add a doc and refresh so primary has more than one segment. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + primaryDiscoveryNode, + testCheckpoint + ); + final FileChunkWriter segmentSegmentFileChunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> { + // cancel the replication as soon as the writer starts sending files. + replications.cancel(replica.routingEntry().allocationId().getId(), "Test"); + }; + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + assertEquals(1, replications.size()); + assertEquals(1, replications.cachedCopyStateSize()); + getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + testCheckpoint + ); + replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected onFailure to be invoked."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } + public void testMultipleReplicasUseSameCheckpoint() throws IOException { IndexShard secondReplica = newShard(primary.shardId(), false); recoverReplica(secondReplica, primary, true); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 6bce74be569c3..323445bee1274 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -9,12 +9,14 @@ package org.opensearch.indices.replication; import org.apache.lucene.util.Version; +import org.junit.Assert; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -28,6 +30,8 @@ import java.util.Arrays; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -126,6 +130,39 @@ public void testGetSegmentFiles() { assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); } + public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("onFailure response expected."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); + latch.countDown(); + } + } + ); + replicationSource.cancel(); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved in a failure", 0, latch.getCount()); + } + private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 2c52772649acc..a6e169dbc3d61 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -28,6 +29,8 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -197,4 +200,47 @@ public void testReplicationAlreadyRunning() throws IOException { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); Assert.assertThrows(OpenSearchException.class, () -> { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); }); } + + public void testCancelReplication() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + chunkWriter = mock(FileChunkWriter.class); + + final ReplicationCheckpoint latestReplicationCheckpoint = primary.getLatestReplicationCheckpoint(); + final CopyState copyState = new CopyState(latestReplicationCheckpoint, primary); + SegmentReplicationSourceHandler handler = new SegmentReplicationSourceHandler( + localNode, + chunkWriter, + threadPool, + copyState, + primary.routingEntry().allocationId().getId(), + 5000, + 1 + ); + + final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + Collections.emptyList(), + latestReplicationCheckpoint + ); + + // cancel before xfer starts. Cancels during copy will be tested in SegmentFileTransferHandlerTests, that uses the same + // cancellableThreads. + handler.cancel("test"); + handler.sendFiles(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected failure."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index de739f4ca834a..7d9b0f09f21cd 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,21 +9,22 @@ package org.opensearch.indices.replication; import org.junit.Assert; -import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; -import org.opensearch.transport.TransportService; +import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -35,12 +36,12 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.eq; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { - private IndexShard indexShard; + private IndexShard replicaShard; + private IndexShard primaryShard; private ReplicationCheckpoint checkpoint; private SegmentReplicationSource replicationSource; private SegmentReplicationTargetService sut; @@ -52,20 +53,20 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { public void setUp() throws Exception { super.setUp(); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - final TransportService transportService = mock(TransportService.class); - indexShard = newStartedShard(false, settings); - checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 0L, 0L, 0L, 0L); + primaryShard = newStartedShard(true); + replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); + recoverReplica(replicaShard, primaryShard, true); + checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); replicationSource = mock(SegmentReplicationSource.class); - when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); + when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); - sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); - initialCheckpoint = indexShard.getLatestReplicationCheckpoint(); + sut = prepareForReplication(primaryShard); + initialCheckpoint = replicaShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), @@ -77,44 +78,58 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { - closeShards(indexShard); + closeShards(primaryShard, replicaShard); super.tearDown(); } - public void testTargetReturnsSuccess_listenerCompletes() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); - } + public void testsSuccessfulReplication_listenerCompletes() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + sut.startReplication(checkpoint, replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + latch.countDown(); + } - @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - Assert.fail(); - } + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.error("Unexpected error", e); + Assert.fail("Test should succeed"); } - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - // set up stage correctly so the transition in markAsDone succeeds on listener completion - moveTargetToFinalStage(target); - final ActionListener listener = invocation.getArgument(0); - listener.onResponse(null); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } - public void testTargetThrowsException() { + public void testReplicationFails() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); final OpenSearchException expectedError = new OpenSearchException("Fail"); + SegmentReplicationSource source = new SegmentReplicationSource() { + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(expectedError); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be called"); + } + }; final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, - replicationSource, + replicaShard, + source, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { @@ -123,24 +138,21 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + // failures leave state object in last entered stage. + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); assertEquals(expectedError, e.getCause()); - assertTrue(sendShardFailure); + latch.countDown(); } } ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - final ActionListener listener = invocation.getArgument(0); - listener.onFailure(expectedError); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + sut.startReplication(target); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(indexShard.getLatestReplicationCheckpoint(), indexShard); + spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -149,7 +161,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, + replicaShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) ); @@ -161,7 +173,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { doAnswer(invocation -> { final ActionListener listener = invocation.getArgument(0); // a new checkpoint arrives before we've completed. - serviceSpy.onNewCheckpoint(aheadCheckpoint, indexShard); + serviceSpy.onNewCheckpoint(aheadCheckpoint, replicaShard); listener.onResponse(null); latch.countDown(); return null; @@ -173,12 +185,12 @@ public void testShardAlreadyReplicating() throws InterruptedException { // wait for the new checkpoint to arrive, before the listener completes. latch.await(30, TimeUnit.SECONDS); - verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(indexShard), any()); + verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(replicaShard), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(checkpoint, indexShard); + spy.onNewCheckpoint(checkpoint, replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -190,22 +202,6 @@ public void testShardNotStarted() throws IOException { closeShards(shard); } - public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOException { - allowShardFailures(); - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(aheadCheckpoint, spyShard); - verify(spy, times(1)).startReplication(any(), any(), captor.capture()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onFailure(new SegmentReplicationState(new ReplicationLuceneIndex()), new OpenSearchException("testing"), true); - verify(spyShard).failShard(any(), any()); - closeShard(indexShard, false); - } - /** * here we are starting a new shard in PrimaryMode and testing that we don't process a checkpoint on shard when it is in PrimaryMode. */ @@ -215,70 +211,10 @@ public void testRejectCheckpointOnShardPrimaryMode() throws IOException { // Starting a new shard in PrimaryMode. IndexShard primaryShard = newStartedShard(true); IndexShard spyShard = spy(primaryShard); - doNothing().when(spy).startReplication(any(), any(), any()); spy.onNewCheckpoint(aheadCheckpoint, spyShard); // Verify that checkpoint is not processed as shard is in PrimaryMode. verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } - - public void testReplicationOnDone() throws IOException { - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ReplicationCheckpoint cp = indexShard.getLatestReplicationCheckpoint(); - ReplicationCheckpoint newCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 1 - ); - ReplicationCheckpoint anotherNewCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 2 - ); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(newCheckpoint, spyShard); - spy.onNewCheckpoint(anotherNewCheckpoint, spyShard); - verify(spy, times(1)).startReplication(eq(newCheckpoint), any(), captor.capture()); - verify(spy, times(1)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex())); - doNothing().when(spy).onNewCheckpoint(any(), any()); - verify(spy, timeout(100).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - closeShard(indexShard, false); - } - - public void testBeforeIndexShardClosed_CancelsOngoingReplications() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - mock(SegmentReplicationTargetService.SegmentReplicationListener.class) - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - sut.startReplication(spy); - sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); - verify(spy, times(1)).cancel(any()); - } - - /** - * Move the {@link SegmentReplicationTarget} object through its {@link SegmentReplicationState.Stage} values in order - * until the final, non-terminal stage. - */ - private void moveTargetToFinalStage(SegmentReplicationTarget target) { - SegmentReplicationState.Stage[] stageValues = SegmentReplicationState.Stage.values(); - assertEquals(target.state().getStage(), SegmentReplicationState.Stage.INIT); - // Skip the first two stages (DONE and INIT) and iterate until the last value - for (int i = 2; i < stageValues.length; i++) { - target.state().setStage(stageValues[i]); - } - } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4d3b841e203de..ff4005d9bcedf 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -185,6 +185,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; @@ -1857,6 +1858,7 @@ public void onFailure(final Exception e) { transportService, new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService) ), + SegmentReplicationSourceService.NO_OP, shardStateAction, new NodeMappingRefreshAction(transportService, metadataMappingService), repositoriesService, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 08004b7e42fea..1b40cb4f2dfa3 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -68,7 +68,6 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -112,7 +111,10 @@ import org.opensearch.indices.replication.CheckpointInfoResponse; import org.opensearch.indices.replication.GetSegmentFilesResponse; import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; @@ -127,8 +129,10 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; @@ -146,7 +150,9 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; /** @@ -1171,35 +1177,40 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { } /** - * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. - * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that - * writes all segments directly to the target. + * Segment Replication specific test method - Creates a {@link SegmentReplicationTargetService} to perform replications that has + * been configured to return the given primaryShard's current segments. + * + * @param primaryShard {@link IndexShard} - The primary shard to replicate from. */ - public final void replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); - Store.MetadataSnapshot primaryMetadata; - try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { - final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); - } - final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); - - final ReplicationCollection replicationCollection = new ReplicationCollection<>(logger, threadPool); - final SegmentReplicationSource source = new SegmentReplicationSource() { + public final SegmentReplicationTargetService prepareForReplication(IndexShard primaryShard) { + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + final SegmentReplicationSource replicationSource = new SegmentReplicationSource() { @Override public void getCheckpointMetadata( long replicationId, ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) - ); + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } } @Override @@ -1211,9 +1222,7 @@ public void getSegmentFiles( ActionListener listener ) { try ( - final ReplicationCollection.ReplicationRef replicationRef = replicationCollection.get( - replicationId - ) + final ReplicationCollection.ReplicationRef replicationRef = targetService.get(replicationId) ) { writeFileChunks(replicationRef.get(), primaryShard, filesToFetch.toArray(new StoreFileMetadata[] {})); } catch (IOException e) { @@ -1222,15 +1231,43 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; + when(sourceFactory.get(any())).thenReturn(replicationSource); + return targetService; + } + /** + * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. + * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that + * writes all segments directly to the target. + * @param primaryShard - {@link IndexShard} The current primary shard. + * @param replicaShards - Replicas that will be updated. + * @return {@link List} List of target components orchestrating replication. + */ + public final List replicateSegments(IndexShard primaryShard, List replicaShards) + throws IOException, InterruptedException { + final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard); + return replicateSegments(targetService, primaryShard, replicaShards); + } + + public final List replicateSegments( + SegmentReplicationTargetService targetService, + IndexShard primaryShard, + List replicaShards + ) throws IOException, InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + Store.MetadataSnapshot primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); + } + List ids = new ArrayList<>(); for (IndexShard replica : replicaShards) { - final SegmentReplicationTarget target = new SegmentReplicationTarget( + final SegmentReplicationTarget target = targetService.startReplication( ReplicationCheckpoint.empty(replica.shardId), replica, - source, - new ReplicationListener() { + new SegmentReplicationTargetService.SegmentReplicationListener() { @Override - public void onDone(ReplicationState state) { + public void onReplicationDone(SegmentReplicationState state) { try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { final SegmentInfos replicaInfos = snapshot.get(); final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); @@ -1241,31 +1278,22 @@ public void onDone(ReplicationState state) { assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); + } finally { + countDownLatch.countDown(); } - countDownLatch.countDown(); } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { logger.error("Unexpected replication failure in test", e); Assert.fail("test replication should not fail: " + e); } } ); - replicationCollection.start(target, TimeValue.timeValueMillis(5000)); - target.startReplication(new ActionListener<>() { - @Override - public void onResponse(Void o) { - replicationCollection.markAsDone(target.getId()); - } - - @Override - public void onFailure(Exception e) { - replicationCollection.fail(target.getId(), new OpenSearchException("Segment Replication failed", e), true); - } - }); + ids.add(target); + countDownLatch.await(1, TimeUnit.SECONDS); } - countDownLatch.await(3, TimeUnit.SECONDS); + return ids; } private void writeFileChunks(SegmentReplicationTarget target, IndexShard primary, StoreFileMetadata[] files) throws IOException { From 4a6e937b3ba120f38fa991c9c481a01daec18e94 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 31 Aug 2022 18:49:19 -0700 Subject: [PATCH 22/78] Bug fixes for dependabot changelog verifier (#4364) * Fix token usage for changelog helper Signed-off-by: Kunal Kotwani * Add conditional check for dependabot steps Signed-off-by: Kunal Kotwani * Add dependency section Signed-off-by: Kunal Kotwani * Bug fixes for dependabot changelog verifier Signed-off-by: Kunal Kotwani * Update the changelog Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/pull_request_template.md | 2 +- .github/workflows/changelog_verifier.yml | 12 ------------ .github/workflows/dependabot_pr.yml | 14 ++++++++++++++ .linelint.yml | 1 + CHANGELOG.md | 1 + 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c76e27d6dfc7d..4537cadf71074 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -10,7 +10,7 @@ - [ ] New functionality has been documented. - [ ] New functionality has javadoc added - [ ] Commits are signed per the DCO using --signoff -- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../CONTRIBUTING.md#changelog)) +- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index ac0c0ec4d7297..cda5dde462068 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -13,16 +13,4 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - - uses: dangoslen/dependabot-changelog-helper@v1 - with: - version: 'Unreleased' - - - uses: stefanzweifel/git-auto-commit-action@v4 - with: - commit_message: "Update changelog" - branch: ${{ github.head_ref }} - commit_user_name: dependabot[bot] - commit_user_email: support@github.com - commit_options: '--signoff' - - uses: dangoslen/changelog-enforcer@v3 diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2ac904bf4ccf7..ed98bae8978ed 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -47,3 +47,17 @@ jobs: commit_user_name: dependabot[bot] commit_user_email: support@github.com commit_options: '--signoff' + + - name: Update the changelog + uses: dangoslen/dependabot-changelog-helper@v1 + with: + version: 'Unreleased' + + - name: Commit the changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update changelog" + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' diff --git a/.linelint.yml b/.linelint.yml index 6240c8b3d7a96..ec947019f8ab6 100644 --- a/.linelint.yml +++ b/.linelint.yml @@ -7,6 +7,7 @@ ignore: - .idea/ - '*.sha1' - '*.txt' + - 'CHANGELOG.md' - '.github/CODEOWNERS' - 'buildSrc/src/testKit/opensearch.build/LICENSE' - 'buildSrc/src/testKit/opensearch.build/NOTICE' diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d07052d55ff0..07b08d853cf45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) +- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 689a2c44eee85b025c95516c4949d3a1bc3ec284 Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:58:45 +0530 Subject: [PATCH 23/78] Add changes for Create PIT and Delete PIT rest layer and rest high level client (#4064) * Create and delete PIT search rest layer changes Signed-off-by: Bharathwaj G --- CHANGELOG.md | 1 + .../opensearch/client/RequestConverters.java | 38 ++++- .../client/RestHighLevelClient.java | 118 ++++++++++++++++ .../java/org/opensearch/client/PitIT.java | 102 ++++++++++++++ .../client/RequestConvertersTests.java | 44 ++++++ .../client/RestHighLevelClientTests.java | 1 + .../java/org/opensearch/client/SearchIT.java | 47 +++++++ .../rest-api-spec/api/create_pit.json | 44 ++++++ .../rest-api-spec/api/delete_all_pits.json | 19 +++ .../rest-api-spec/api/delete_pit.json | 23 +++ .../rest-api-spec/test/pit/10_basic.yml | 130 +++++++++++++++++ .../org/opensearch/action/ActionModule.java | 10 ++ .../action/search/DeletePitInfo.java | 4 +- .../action/search/DeletePitRequest.java | 5 + .../action/search/GetAllPitNodesRequest.java | 11 ++ .../action/search/GetAllPitNodesResponse.java | 8 ++ .../action/search/NodesGetAllPitsAction.java | 23 +++ .../opensearch/action/search/PitService.java | 23 ++- .../search/TransportDeletePitAction.java | 6 +- .../search/TransportGetAllPitsAction.java | 80 +++-------- .../TransportNodesGetAllPitsAction.java | 86 +++++++++++ .../action/search/RestCreatePitAction.java | 57 ++++++++ .../action/search/RestDeletePitAction.java | 60 ++++++++ .../org/opensearch/search/SearchService.java | 4 +- .../search/CreatePitControllerTests.java | 11 +- .../search/TransportDeletePitActionTests.java | 18 +-- .../search/CreatePitSingleNodeTests.java | 52 +++++++ .../search/pit/RestCreatePitActionTests.java | 78 ++++++++++ .../search/pit/RestDeletePitActionTests.java | 133 ++++++++++++++++++ 29 files changed, 1152 insertions(+), 84 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml create mode 100644 server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java create mode 100644 server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java create mode 100644 server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 07b08d853cf45..f89e7eba0698c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 6fa57295f48e4..eedc27d1d2ea7 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -54,6 +54,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -92,6 +94,7 @@ import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -433,9 +436,19 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); - params.withIndicesOptions(searchRequest.indicesOptions()); + if (searchRequest.pointInTimeBuilder() == null) { + params.withIndicesOptions(searchRequest.indicesOptions()); + } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + /** + * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + * refer to org.opensearch.action.search.SearchResponseMerger + */ + if (searchRequest.pointInTimeBuilder() != null) { + params.putParam("ccs_minimize_roundtrips", "false"); + } else { + params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + } if (searchRequest.getPreFilterShardSize() != null) { params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); } @@ -464,6 +477,27 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep return request; } + static Request createPit(CreatePitRequest createPitRequest) throws IOException { + Params params = new Params(); + params.putParam(RestCreatePitAction.ALLOW_PARTIAL_PIT_CREATION, Boolean.toString(createPitRequest.shouldAllowPartialPitCreation())); + params.putParam(RestCreatePitAction.KEEP_ALIVE, createPitRequest.getKeepAlive()); + params.withIndicesOptions(createPitRequest.indicesOptions()); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(createPitRequest.indices(), "_search/point_in_time")); + request.addParameters(params.asMap()); + request.setEntity(createEntity(createPitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deletePit(DeletePitRequest deletePitRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time"); + request.setEntity(createEntity(deletePitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteAllPits() { + return new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time/_all"); + } + static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 28a441bdf7f7f..0c73c65f6175f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -59,6 +59,10 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -1250,6 +1254,120 @@ public final Cancellable scrollAsync( ); } + /** + * Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final CreatePitResponse createPit(CreatePitRequest createPitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable createPitAsync( + CreatePitRequest createPitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deletePitAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deleteAllPits(RequestOptions options) throws IOException { + return performRequestAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deleteAllPitsAsync(RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java new file mode 100644 index 0000000000000..395ec6e46a7b3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.junit.Before; +import org.opensearch.OpenSearchStatusException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Tests point in time API with rest high level client + */ +public class PitIT extends OpenSearchRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); + doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); + client().performRequest(doc1); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); + doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); + client().performRequest(doc2); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); + doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); + client().performRequest(doc3); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); + doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); + client().performRequest(doc4); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); + doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); + client().performRequest(doc5); + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCreateAndDeletePit() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertEquals(1, pitResponse.getTotalShards()); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(0, pitResponse.getFailedShards()); + assertEquals(0, pitResponse.getSkippedShards()); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + + public void testDeleteAllPits() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + DeletePitResponse deletePitResponse = highLevelClient().deleteAllPits(RequestOptions.DEFAULT); + for (DeletePitInfo deletePitInfo : deletePitResponse.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + ActionListener deletePitListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + } + + @Override + public void onFailure(Exception e) { + if (!(e instanceof OpenSearchStatusException)) { + throw new AssertionError("Delete all failed"); + } + } + }; + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + // validate no pits case + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + } +} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 97c0f2f475826..ee5795deb165d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -53,6 +53,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -131,6 +133,7 @@ import java.util.Locale; import java.util.Map; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1303,6 +1306,47 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testCreatePit() throws IOException { + String[] indices = randomIndicesNames(0, 5); + Map expectedParams = new HashMap<>(); + expectedParams.put("keep_alive", "1d"); + expectedParams.put("allow_partial_pit_creation", "true"); + CreatePitRequest createPitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, indices); + setRandomIndicesOptions(createPitRequest::indicesOptions, createPitRequest::indicesOptions, expectedParams); + Request request = RequestConverters.createPit(createPitRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/point_in_time"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(createPitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeletePit() throws IOException { + List pitIdsList = new ArrayList<>(); + pitIdsList.add("pitId1"); + pitIdsList.add("pitId2"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIdsList); + Request request = RequestConverters.deletePit(deletePitRequest); + String endpoint = "/_search/point_in_time"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertToXContentBody(deletePitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeleteAllPits() { + Request request = RequestConverters.deleteAllPits(); + String endpoint = "/_search/point_in_time/_all"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 3da0f81023f72..cdd63743f2644 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -134,6 +134,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { // core "ping", "info", + "delete_all_pits", // security "security.get_ssl_certificates", "security.authenticate", diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 19e287fb91be5..8b509e5d19e92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -43,6 +43,10 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -89,6 +93,7 @@ import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -100,11 +105,13 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -762,6 +769,46 @@ public void testSearchScroll() throws Exception { } } + public void testSearchWithPit() throws Exception { + for (int i = 0; i < 100; i++) { + XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); + Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); + doc.setJsonEntity(Strings.toString(builder)); + client().performRequest(doc); + } + client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); + + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "test"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35) + .sort("field", SortOrder.ASC) + .pointInTimeBuilder(new PointInTimeBuilder(pitResponse.getId())); + SearchRequest searchRequest = new SearchRequest().source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + + try { + long counter = 0; + assertSearchHeader(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); + } + } finally { + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deletePit, + highLevelClient()::deletePitAsync + ); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + } + public void testMultiSearch() throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); SearchRequest searchRequest1 = new SearchRequest("index1"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json new file mode 100644 index 0000000000000..d3a2104c01bc0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -0,0 +1,44 @@ + +{ + "create_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Creates point in time context." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/{index}/_search/point_in_time", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "allow_partial_pit_creation":{ + "type":"boolean", + "description":"Allow if point in time can be created with partial failures" + }, + "keep_alive":{ + "type":"string", + "description":"Specify the keep alive for point in time" + }, + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json new file mode 100644 index 0000000000000..5ff01aa746df9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -0,0 +1,19 @@ +{ + "delete_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes all active point in time searches." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json new file mode 100644 index 0000000000000..b54d9f76204f4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -0,0 +1,23 @@ +{ + "delete_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes one or more point in time searches based on the IDs passed." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time", + "methods":[ + "DELETE" + ] + } + ] + }, + "body":{ + "description":"A comma-separated list of pit IDs to clear", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml new file mode 100644 index 0000000000000..2023bcc8f5c87 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -0,0 +1,130 @@ +"Create PIT, Search with PIT ID and Delete": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_pit + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id", "keep_alive":"10m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + + - do: + search: + rest_total_hits_as_int: true + index: test_pit + size: 1 + sort: foo + body: + query: + match_all: {} + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + + - do: + delete_pit: + body: + "pit_id": [$pit_id] + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + +--- +"Delete all": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + + - do: + delete_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + + - do: + catch: missing + delete_all_pits: { } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 797c5c38fada6..74be544123d9f 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -240,12 +240,14 @@ import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.MultiSearchAction; +import org.opensearch.action.search.NodesGetAllPitsAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportDeletePitAction; import org.opensearch.action.search.TransportGetAllPitsAction; +import org.opensearch.action.search.TransportNodesGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -408,6 +410,8 @@ import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; @@ -674,6 +678,7 @@ public void reg actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); actions.register(PitSegmentsAction.INSTANCE, TransportPitSegmentsAction.class); + actions.register(NodesGetAllPitsAction.INSTANCE, TransportNodesGetAllPitsAction.class); // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); @@ -849,6 +854,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + + // Point in time API + registerHandler.accept(new RestCreatePitAction()); + registerHandler.accept(new RestDeletePitAction()); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 943199812771a..5a167c5a6f160 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -65,11 +65,11 @@ public void writeTo(StreamOutput out) throws IOException { static { PARSER.declareBoolean(constructorArg(), new ParseField("successful")); - PARSER.declareString(constructorArg(), new ParseField("pitId")); + PARSER.declareString(constructorArg(), new ParseField("pit_id")); } private static final ParseField SUCCESSFUL = new ParseField("successful"); - private static final ParseField PIT_ID = new ParseField("pitId"); + private static final ParseField PIT_ID = new ParseField("pit_id"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 945fcfd17eb6c..926e9c19a33f5 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -48,6 +48,11 @@ public DeletePitRequest(List pitIds) { this.pitIds.addAll(pitIds); } + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + public DeletePitRequest() {} public List getPitIds() { diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index b4ad2f6641087..340f9b842adbf 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -21,11 +21,22 @@ */ public class GetAllPitNodesRequest extends BaseNodesRequest { + // Security plugin intercepts and sets the response with permitted PIT contexts + private GetAllPitNodesResponse getAllPitNodesResponse; + @Inject public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { super(concreteNodes); } + public void setGetAllPitNodesResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + this.getAllPitNodesResponse = getAllPitNodesResponse; + } + + public GetAllPitNodesResponse getGetAllPitNodesResponse() { + return getAllPitNodesResponse; + } + public GetAllPitNodesRequest(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 4a454e7145eff..091447798cf5f 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -52,6 +52,14 @@ public GetAllPitNodesResponse( ); } + /** + * Copy constructor that explicitly sets the list pit infos + */ + public GetAllPitNodesResponse(List listPitInfos, GetAllPitNodesResponse response) { + super(response.getClusterName(), response.getNodes(), response.failures()); + pitInfos.addAll(listPitInfos); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java new file mode 100644 index 0000000000000..af41f7d49551c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for retrieving all PIT reader contexts from nodes + */ +public class NodesGetAllPitsAction extends ActionType { + public static final NodesGetAllPitsAction INSTANCE = new NodesGetAllPitsAction(); + public static final String NAME = "cluster:admin/point_in_time/read_from_nodes"; + + private NodesGetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 0b79b77fd6014..ff068397ad94e 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; @@ -47,12 +48,19 @@ public class PitService { private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final TransportService transportService; + private final NodeClient nodeClient; @Inject - public PitService(ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService) { + public PitService( + ClusterService clusterService, + SearchTransportService searchTransportService, + TransportService transportService, + NodeClient nodeClient + ) { this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.transportService = transportService; + this.nodeClient = nodeClient; } /** @@ -144,6 +152,17 @@ public void onFailure(final Exception e) { }, size); } + /** + * This method returns indices associated for each pit + */ + public Map getIndicesForPits(List pitIds) { + Map pitToIndicesMap = new HashMap<>(); + for (String pitId : pitIds) { + pitToIndicesMap.put(pitId, SearchContextId.decode(nodeClient.getNamedWriteableRegistry(), pitId).getActualIndices()); + } + return pitToIndicesMap; + } + /** * Get all active point in time contexts */ @@ -156,7 +175,7 @@ public void getAllPits(ActionListener getAllPitsListener DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); transportService.sendRequest( transportService.getLocalNode(), - GetAllPitsAction.NAME, + NodesGetAllPitsAction.NAME, new GetAllPitNodesRequest(disNodesArr), new TransportResponseHandler() { diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index f9e36c479dd54..19abe2361290d 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -57,7 +57,11 @@ public TransportDeletePitAction( @Override protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { List pitIds = request.getPitIds(); - if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + // when security plugin intercepts the request, if PITs are not present in the cluster the PIT IDs in request will be empty + // and in this case return empty response + if (pitIds.isEmpty()) { + listener.onResponse(new DeletePitResponse(new ArrayList<>())); + } else if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { deleteAllPits(listener); } else { deletePits(listener, request); diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java index 21a64e388fa7b..c8529c5b02bd4 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -8,79 +8,31 @@ package org.opensearch.action.search; -import org.opensearch.action.FailedNodeException; +import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.TransportNodesAction; -import org.opensearch.cluster.service.ClusterService; +import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.SearchService; -import org.opensearch.threadpool.ThreadPool; +import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; -import java.io.IOException; -import java.util.List; - /** - * Transport action to get all active PIT contexts across all nodes + * Transport action to get all active PIT contexts across the cluster */ -public class TransportGetAllPitsAction extends TransportNodesAction< - GetAllPitNodesRequest, - GetAllPitNodesResponse, - GetAllPitNodeRequest, - GetAllPitNodeResponse> { - private final SearchService searchService; +public class TransportGetAllPitsAction extends HandledTransportAction { + private final PitService pitService; @Inject - public TransportGetAllPitsAction( - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - SearchService searchService - ) { - super( - GetAllPitsAction.NAME, - threadPool, - clusterService, - transportService, - actionFilters, - GetAllPitNodesRequest::new, - GetAllPitNodeRequest::new, - ThreadPool.Names.SAME, - GetAllPitNodeResponse.class - ); - this.searchService = searchService; - } - - @Override - protected GetAllPitNodesResponse newResponse( - GetAllPitNodesRequest request, - List getAllPitNodeRespons, - List failures - ) { - return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); - } - - @Override - protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { - return new GetAllPitNodeRequest(); - } - - @Override - protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { - return new GetAllPitNodeResponse(in); + public TransportGetAllPitsAction(ActionFilters actionFilters, TransportService transportService, PitService pitService) { + super(GetAllPitsAction.NAME, transportService, actionFilters, in -> new GetAllPitNodesRequest(in)); + this.pitService = pitService; } - /** - * This retrieves all active PITs in the node - */ - @Override - protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { - GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( - transportService.getLocalNode(), - searchService.getAllPITReaderContexts() - ); - return nodeResponse; + protected void doExecute(Task task, GetAllPitNodesRequest request, ActionListener listener) { + // If security plugin intercepts the request, it'll replace all PIT IDs with permitted PIT IDs + if (request.getGetAllPitNodesResponse() != null) { + listener.onResponse(request.getGetAllPitNodesResponse()); + } else { + pitService.getAllPits(listener); + } } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java new file mode 100644 index 0000000000000..520830cd293f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportNodesGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportNodesGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + NodesGetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeRespons, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java new file mode 100644 index 0000000000000..9439670880015 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for creating PIT context + */ +public class RestCreatePitAction extends BaseRestHandler { + public static String ALLOW_PARTIAL_PIT_CREATION = "allow_partial_pit_creation"; + public static String KEEP_ALIVE = "keep_alive"; + + @Override + public String getName() { + return "create_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + boolean allowPartialPitCreation = request.paramAsBoolean(ALLOW_PARTIAL_PIT_CREATION, true); + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + TimeValue keepAlive = request.paramAsTime(KEEP_ALIVE, null); + CreatePitRequest createPitRequest = new CreatePitRequest(keepAlive, allowPartialPitCreation, indices); + createPitRequest.setIndicesOptions(IndicesOptions.fromRequest(request, createPitRequest.indicesOptions())); + createPitRequest.setPreference(request.param("preference")); + createPitRequest.setRouting(request.param("routing")); + + return channel -> client.createPit(createPitRequest, new RestStatusToXContentListener<>(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_search/point_in_time"))); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java new file mode 100644 index 0000000000000..452e66f8f5018 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action for deleting PIT contexts + */ +public class RestDeletePitAction extends BaseRestHandler { + + @Override + public String getName() { + return "delete_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String allPitIdsQualifier = "_all"; + final DeletePitRequest deletePITRequest; + if (request.path().contains(allPitIdsQualifier)) { + deletePITRequest = new DeletePitRequest(asList(allPitIdsQualifier)); + } else { + deletePITRequest = new DeletePitRequest(); + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + try { + deletePITRequest.fromXContent(xContentParser); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + })); + } + return channel -> client.deletePits(deletePITRequest, new RestStatusToXContentListener(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(DELETE, "/_search/point_in_time"), new Route(DELETE, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 4bd95da193668..04fab85c163a9 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -881,6 +881,7 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL shard.awaitShardSearchActive(ignored -> { Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; + Releasable decreasePitContexts = openPitContexts::decrementAndGet; try { if (openPitContexts.incrementAndGet() > maxOpenPitContext) { throw new OpenSearchRejectedExecutionException( @@ -902,15 +903,16 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL searchOperationListener.onNewPitContext(finalReaderContext); readerContext.addOnClose(() -> { - openPitContexts.decrementAndGet(); searchOperationListener.onFreeReaderContext(finalReaderContext); searchOperationListener.onFreePitContext(finalReaderContext); }); + readerContext.addOnClose(decreasePitContexts); // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); } catch (Exception exc) { + Releasables.closeWhileHandlingException(decreasePitContexts); Releasables.closeWhileHandlingException(searcherSupplier, readerContext); listener.onFailure(exc); } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index a5c6e1c12b79c..c03c27f7d7e4d 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -14,6 +14,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.StepListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -70,6 +71,8 @@ public class CreatePitControllerTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + Settings settings = Settings.builder().put("node.name", CreatePitControllerTests.class.getSimpleName()).build(); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -219,7 +222,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -308,7 +311,7 @@ public void sendFreePITContexts( CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -406,7 +409,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -494,7 +497,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 7a1d9a6fe963c..bdc0440a89f69 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -13,6 +13,7 @@ import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; @@ -62,6 +63,7 @@ public class TransportDeletePitActionTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); private ThreadPool threadPool = new ThreadPool(settings); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -165,7 +167,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -229,7 +231,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -312,7 +314,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -371,7 +373,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -439,7 +441,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -505,7 +507,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -581,7 +583,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -661,7 +663,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index a10f004b2ee97..9a28f1800847e 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -14,6 +14,10 @@ import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -33,6 +37,8 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -282,6 +288,52 @@ public void testMaxOpenPitContexts() throws Exception { validatePitStats("index", 0, maxPitContexts, 0); } + public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + try { + for (int i = 0; i < 1000; i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + } catch (Exception ex) { + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + } + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); + // deleteall + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, maxPitContexts, 0); + client().execute(CreatePitAction.INSTANCE, request).get(); + validatePitStats("index", 1, maxPitContexts, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts + 1, 0); + } + public void testOpenPitContextsConcurrently() throws Exception { createIndex("index"); final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java new file mode 100644 index 0000000000000..5ca384daedbff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests to verify behavior of create pit rest action + */ +public class RestCreatePitActionTests extends OpenSearchTestCase { + public void testRestCreatePit() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertFalse(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + params.put("allow_partial_pit_creation", "false"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } + + public void testRestCreatePitDefaultPartialCreation() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertTrue(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java new file mode 100644 index 0000000000000..0bfa16aafe1e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestDeletePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * Tests to verify the behavior of rest delete pit action for list delete and delete all PIT endpoints + */ +public class RestDeletePitActionTests extends OpenSearchTestCase { + public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exception { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{invalid_json}"), + XContentType.JSON + ).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + + public void testDeletePitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("BODY")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPit() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPitWithBody() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); + } + } + + public void testDeletePitQueryStringParamsShouldThrowException() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(2)); + assertThat(request.getPitIds().get(0), equalTo("QUERY_STRING")); + assertThat(request.getPitIds().get(1), equalTo("QUERY_STRING_1")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( + Collections.singletonMap("pit_id", "QUERY_STRING,QUERY_STRING_1") + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("unrecognized param")); + } + } +} From bd11c6958a78f57ce4de33ef5d004044bf838622 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Sep 2022 08:24:08 -0700 Subject: [PATCH 24/78] Bump com.diffplug.spotless from 6.9.1 to 6.10.0 (#4319) --- CHANGELOG.md | 4 +++- build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f89e7eba0698c..19b5c8e85cfeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +### Dependencies +- Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) @@ -53,4 +55,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/build.gradle b/build.gradle index ce5ea6cdd7e11..a1f4f2d04883a 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.9.1" apply false + id "com.diffplug.spotless" version "6.10.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 236f2f611051bd50aaa9fcb2ccbaa4bd6211fea0 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 1 Sep 2022 11:28:35 -0400 Subject: [PATCH 25/78] Update to Netty 4.1.80.Final (#4359) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- modules/transport-netty4/build.gradle | 8 ++++++++ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.80.Final.jar.sha1 | 1 + ...tty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 - ...tty-transport-native-unix-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 | 1 + ...tty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 - ...tty-transport-native-unix-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-all-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.80.Final.jar.sha1 | 1 + plugins/transport-nio/build.gradle | 6 ++++++ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.80.Final.jar.sha1 | 1 + 50 files changed, 39 insertions(+), 24 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 19b5c8e85cfeb..6434041b038ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) +- Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/OpenSearch/pull/4359)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 072dcc4578977..6cc24a3f09244 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.79.Final +netty = 4.1.80.Final joda = 2.10.13 # client dependencies diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 5d2047d7f18a2..8bbe0bf2ef65f 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -144,6 +144,14 @@ thirdPartyAudit { 'org.apache.log4j.Level', 'org.apache.log4j.Logger', + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + // from io.netty.handler.ssl.OpenSslEngine (netty) 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.CertificateCompressionAlgo', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..471fe8b211df2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..0f8e3bebe1532 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d18720d164335 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d96a286b98493 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d256e77b7024c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..022ad6bc93dba --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ad0f71b569377 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2bfb4f377d89b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..998e6e8560724 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a1753b194ea31..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2dab7f40b02b7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +6926d2ea779f41071ecb1948d880dfbb3a6ee126 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d96a286b98493 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 deleted file mode 100644 index 913f0e7685c86..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..625344e6cfb0a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +00025b767be3425f3b31a34ee095c85619169f17 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 deleted file mode 100644 index dbb072f3f665f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..c3184ec5ff7d3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +9b3b42ff805723fb98120f5ab2019c53e71da91b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a5d1be00d9c29..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..bb6a3502a729f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +6b1602f80b6235b0b7d53bc5e9c1a6cd11c1b804 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..998e6e8560724 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 deleted file mode 100644 index 724950db96f09..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ae6eb1d85f1ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +39e73b76a3ec65df731b371179e15f2c3e4e7575 \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index a7e8c42a4e2d3..c5b401de60c8c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -83,6 +83,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..471fe8b211df2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..0f8e3bebe1532 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d18720d164335 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d256e77b7024c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..022ad6bc93dba --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ad0f71b569377 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2bfb4f377d89b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file From 5c3cc935c79b0b1eb48826a3182805d6c51bafea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Sep 2022 09:15:10 -0700 Subject: [PATCH 26/78] Bump xmlbeans from 5.1.0 to 5.1.1 in /plugins/ingest-attachment (#4354) * Bump xmlbeans from 5.1.0 to 5.1.1 in /plugins/ingest-attachment Bumps xmlbeans from 5.1.0 to 5.1.1. --- updated-dependencies: - dependency-name: org.apache.xmlbeans:xmlbeans dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6434041b038ee..93aef24a46674 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 +- Bumps `xmlbeans` from 5.1.0 to 5.1.1 ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 86694b9bc9da7..af9485c991f0c 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.0' + api 'org.apache.xmlbeans:xmlbeans:5.1.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 deleted file mode 100644 index 85f757b61048c..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3534ab896663e6f6d8a2cf46882d7407641d7a31 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 new file mode 100644 index 0000000000000..4d1d2ad0807e7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 @@ -0,0 +1 @@ +48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file From 715da849e0bf910c84a7c7615ab2faccef95b4a7 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Thu, 1 Sep 2022 10:06:19 -0700 Subject: [PATCH 27/78] Fix randomized test failure NRTReplicationEngineTests.testUpdateSegments (#4352) Overload `generateHistoryOnReplica` to be able to generate only a specific `Engine.Operation.TYPE` operations as required by the `testUpdateSegments` test Signed-off-by: Rabi Panda Signed-off-by: Rabi Panda --- CHANGELOG.md | 3 ++- .../index/engine/NRTReplicationEngineTests.java | 12 ++++++++---- .../opensearch/index/engine/EngineTestCase.java | 17 ++++++++++++++++- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93aef24a46674..376c8f37c8063 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) -- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) +- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) +- Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 1fe1a37dedae0..0008afcc901c7 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -112,10 +112,14 @@ public void testUpdateSegments() throws Exception { final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { // add docs to the primary engine. - List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) - .stream() - .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) - .collect(Collectors.toList()); + List operations = generateHistoryOnReplica( + between(1, 500), + randomBoolean(), + randomBoolean(), + randomBoolean(), + Engine.Operation.TYPE.INDEX + ); + for (Engine.Operation op : operations) { applyOperation(engine, op); applyOperation(nrtEngine, op); diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 174747d306ff5..af754d77560cc 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -1070,6 +1070,22 @@ public List generateHistoryOnReplica( boolean allowGapInSeqNo, boolean allowDuplicate, boolean includeNestedDocs + ) throws Exception { + return generateHistoryOnReplica( + numOps, + allowGapInSeqNo, + allowDuplicate, + includeNestedDocs, + randomFrom(Engine.Operation.TYPE.values()) + ); + } + + public List generateHistoryOnReplica( + int numOps, + boolean allowGapInSeqNo, + boolean allowDuplicate, + boolean includeNestedDocs, + Engine.Operation.TYPE opType ) throws Exception { long seqNo = 0; final int maxIdValue = randomInt(numOps * 2); @@ -1077,7 +1093,6 @@ public List generateHistoryOnReplica( CheckedBiFunction nestedParsedDocFactory = nestedParsedDocFactory(); for (int i = 0; i < numOps; i++) { final String id = Integer.toString(randomInt(maxIdValue)); - final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final boolean isNestedDoc = includeNestedDocs && opType == Engine.Operation.TYPE.INDEX && randomBoolean(); final int nestedValues = between(0, 3); final long startTime = threadPool.relativeTimeInNanos(); From 70d911cad98e843f4702673015abce508190d389 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 1 Sep 2022 21:26:36 -0700 Subject: [PATCH 28/78] [AUTO] [main] Added bwc version 2.2.2. (#4383) * Added bwc version 2.2.2 * Add changelog Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani Co-authored-by: opensearch-ci-bot Co-authored-by: Kunal Kotwani --- .ci/bwcVersions | 1 + CHANGELOG.md | 4 +++- server/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1ba3ee562317a..914426eebe35e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -49,4 +49,5 @@ BWC_VERSION: - "2.1.1" - "2.2.0" - "2.2.1" + - "2.2.2" - "2.3.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index 376c8f37c8063..76f134f10c29e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) + ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 @@ -58,4 +60,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index ba512d3fbcdd9..10e5f16419a7a 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -96,6 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; From c885686b0fb12d9b0397d38c37518c65cbb466c5 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 2 Sep 2022 11:52:02 -0700 Subject: [PATCH 29/78] [Segment Replication] Bump segment infos counter before commit during replica promotion (#4365) * [Segment Replication] Bump segment infos counter before commit during replica promotion Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../opensearch/index/engine/NRTReplicationEngine.java | 9 +++++++++ .../index/engine/NRTReplicationEngineTests.java | 2 ++ 3 files changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76f134f10c29e..48d320dd5bce6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) +- [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) - Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 6f5b7030ed65f..cf753e3360c39 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -54,6 +54,8 @@ public class NRTReplicationEngine extends Engine { private final LocalCheckpointTracker localCheckpointTracker; private final WriteOnlyTranslogManager translogManager; + private static final int SI_COUNTER_INCREMENT = 10; + public NRTReplicationEngine(EngineConfig engineConfig) { super(engineConfig); store.incRef(); @@ -142,6 +144,13 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th public void commitSegmentInfos() throws IOException { // TODO: This method should wait for replication events to finalize. final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + /* + This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied + from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is + used to generate new segment file names. The ideal solution is to identify the counter from previous primary. + */ + latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; + latestSegmentInfos.changed(); store.commitSegmentInfos(latestSegmentInfos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); translogManager.syncTranslog(); } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 0008afcc901c7..540054782133a 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -252,6 +252,8 @@ public void testCommitSegmentInfos() throws Exception { // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); assertEquals(previousInfos.getGeneration(), latestSegmentInfos.getLastGeneration()); + assertEquals(previousInfos.getVersion(), latestSegmentInfos.getVersion()); + assertEquals(previousInfos.counter, latestSegmentInfos.counter); Map userData = latestSegmentInfos.getUserData(); assertEquals(processedCheckpoint, localCheckpointTracker.getProcessedCheckpoint()); assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); From b206e98acb69c9d839b7eef74edff8f904eb4b88 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 2 Sep 2022 11:52:35 -0700 Subject: [PATCH 30/78] [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica (#4363) * [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Address review comments 2 Signed-off-by: Suraj Singh * Test failures Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationTarget.java | 4 ++ .../SegmentReplicationTargetService.java | 29 +++++++---- .../common/ReplicationCollection.java | 16 ++++-- .../SegmentReplicationTargetServiceTests.java | 49 ++++++++++++++++++- .../recovery/ReplicationCollectionTests.java | 18 +++++++ 6 files changed, 101 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48d320dd5bce6..182a6b36fca48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) - Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) +- [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index d1d6104a416ca..7c28406036ddd 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -56,6 +56,10 @@ public class SegmentReplicationTarget extends ReplicationTarget { private final SegmentReplicationState state; protected final MultiFileWriter multiFileWriter; + public ReplicationCheckpoint getCheckpoint() { + return this.checkpoint; + } + public SegmentReplicationTarget( ReplicationCheckpoint checkpoint, IndexShard indexShard, diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 9e6b66dc4d7d6..8fc53ccd3bc08 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -18,6 +18,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -34,7 +35,6 @@ import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; -import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -54,7 +54,7 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final SegmentReplicationSourceFactory sourceFactory; - private final Map latestReceivedCheckpoint = new HashMap<>(); + private final Map latestReceivedCheckpoint = ConcurrentCollections.newConcurrentMap(); // Empty Implementation, only required while Segment Replication is under feature flag. public static final SegmentReplicationTargetService NO_OP = new SegmentReplicationTargetService() { @@ -151,14 +151,23 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe } else { latestReceivedCheckpoint.put(replicaShard.shardId(), receivedCheckpoint); } - if (onGoingReplications.isShardReplicating(replicaShard.shardId())) { - logger.trace( - () -> new ParameterizedMessage( - "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", - replicaShard.getLatestReplicationCheckpoint() - ) - ); - return; + SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); + if (ongoingReplicationTarget != null) { + if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { + logger.trace( + "Cancelling ongoing replication from old primary with primary term {}", + ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() + ); + onGoingReplications.cancel(ongoingReplicationTarget.getId(), "Cancelling stuck target after new primary"); + } else { + logger.trace( + () -> new ParameterizedMessage( + "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", + replicaShard.getLatestReplicationCheckpoint() + ) + ); + return; + } } final Thread thread = Thread.currentThread(); if (replicaShard.shouldProcessCheckpoint(receivedCheckpoint)) { diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index d648ca6041ff8..20600856c9444 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -49,6 +49,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; /** * This class holds a collection of all on going replication events on the current node (i.e., the node is the target node @@ -236,13 +237,18 @@ public boolean cancelForShard(ShardId shardId, String reason) { } /** - * check if a shard is currently replicating + * Get target for shard * - * @param shardId shardId for which to check if replicating - * @return true if shard is currently replicating + * @param shardId shardId + * @return ReplicationTarget for input shardId */ - public boolean isShardReplicating(ShardId shardId) { - return onGoingTargetEvents.values().stream().anyMatch(t -> t.indexShard.shardId().equals(shardId)); + public T getOngoingReplicationTarget(ShardId shardId) { + final List replicationTargetList = onGoingTargetEvents.values() + .stream() + .filter(t -> t.indexShard.shardId().equals(shardId)) + .collect(Collectors.toList()); + assert replicationTargetList.size() <= 1 : "More than one on-going replication targets"; + return replicationTargetList.size() > 0 ? replicationTargetList.get(0) : null; } /** diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 7d9b0f09f21cd..1d253b0a9a300 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -49,6 +49,8 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private ReplicationCheckpoint initialCheckpoint; private ReplicationCheckpoint aheadCheckpoint; + private ReplicationCheckpoint newPrimaryCheckpoint; + @Override public void setUp() throws Exception { super.setUp(); @@ -74,6 +76,13 @@ public void setUp() throws Exception { initialCheckpoint.getSeqNo(), initialCheckpoint.getSegmentInfosVersion() + 1 ); + newPrimaryCheckpoint = new ReplicationCheckpoint( + initialCheckpoint.getShardId(), + initialCheckpoint.getPrimaryTerm() + 1, + initialCheckpoint.getSegmentsGen(), + initialCheckpoint.getSeqNo(), + initialCheckpoint.getSegmentInfosVersion() + 1 + ); } @Override @@ -160,7 +169,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, + initialCheckpoint, replicaShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) @@ -185,9 +194,47 @@ public void testShardAlreadyReplicating() throws InterruptedException { // wait for the new checkpoint to arrive, before the listener completes. latch.await(30, TimeUnit.SECONDS); + verify(targetSpy, times(0)).cancel(any()); verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(replicaShard), any()); } + public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws IOException, InterruptedException { + // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. + SegmentReplicationTargetService serviceSpy = spy(sut); + // Create a Mockito spy of target to stub response of few method calls. + final SegmentReplicationTarget targetSpy = spy( + new SegmentReplicationTarget( + initialCheckpoint, + replicaShard, + replicationSource, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) + ); + + CountDownLatch latch = new CountDownLatch(1); + // Mocking response when startReplication is called on targetSpy we send a new checkpoint to serviceSpy and later reduce countdown + // of latch. + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + // a new checkpoint arrives before we've completed. + serviceSpy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); + listener.onResponse(null); + latch.countDown(); + return null; + }).when(targetSpy).startReplication(any()); + doNothing().when(targetSpy).onDone(); + + // start replication. This adds the target to on-ongoing replication collection + serviceSpy.startReplication(targetSpy); + + // wait for the new checkpoint to arrive, before the listener completes. + latch.await(5, TimeUnit.SECONDS); + doNothing().when(targetSpy).startReplication(any()); + verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); + verify(serviceSpy, times(1)).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); + closeShards(replicaShard); + } + public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(checkpoint, replicaShard); diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 7587f48503625..1789dd3b2a288 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -105,7 +105,25 @@ public void onFailure(ReplicationState state, OpenSearchException e, boolean sen collection.cancel(recoveryId, "meh"); } } + } + public void testMultiReplicationsForSingleShard() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); + final IndexShard shard1 = shards.addReplica(); + final IndexShard shard2 = shards.addReplica(); + final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard1); + final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shard2); + try { + collection.getOngoingReplicationTarget(shard1.shardId()); + } catch (AssertionError e) { + assertEquals(e.getMessage(), "More than one on-going replication targets"); + } finally { + collection.cancel(recoveryId, "meh"); + collection.cancel(recoveryId2, "meh"); + } + closeShards(shard1, shard2); + } } public void testRecoveryCancellation() throws Exception { From 0c10674924d66547d009237f8dd333243aac6ac4 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Fri, 2 Sep 2022 13:44:38 -0700 Subject: [PATCH 31/78] =?UTF-8?q?Adding=20support=20for=20labels=20on=20ve?= =?UTF-8?q?rsion=20bump=20PRs,=20skip=20label=20support=20for=E2=80=A6=20(?= =?UTF-8?q?#4391)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Adding support for labels on version bump PRs, skip label support for changelog verifier Signed-off-by: Kunal Kotwani * Add changelog Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 2 ++ .github/workflows/version.yml | 8 +++++++- CHANGELOG.md | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index cda5dde462068..96f99f17b016e 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -14,3 +14,5 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/changelog-enforcer@v3 + with: + skipLabels: "autocut" diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 030689642677a..42c2d21d106ce 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -5,7 +5,7 @@ on: tags: - '*.*.*' -jobs: +jobs: build: runs-on: ubuntu-latest steps: @@ -61,6 +61,8 @@ jobs: commit-message: Incremented version to ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] Incremented version to ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and incremented the version from ${{ env.CURRENT_VERSION }} to ${{ env.NEXT_VERSION }}. @@ -86,6 +88,8 @@ jobs: commit-message: Added bwc version ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] [${{ env.BASE_X }}] Added bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. @@ -111,6 +115,8 @@ jobs: commit-message: Added bwc version ${{ env.NEXT_VERSION }} signoff: true delete-branch: true + labels: | + autocut title: '[AUTO] [main] Added bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. diff --git a/CHANGELOG.md b/CHANGELOG.md index 182a6b36fca48..dc6c290253dbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) +- Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 From fab2a122cbb5ce535066d3c3b9c4ae9012d2e331 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 2 Sep 2022 18:57:39 -0700 Subject: [PATCH 32/78] [Segment Replication] Extend FileChunkWriter to allow cancel on transport client (#4386) * [Segment Replication] Extend FileChunkWriter to allow cancel on retryable transport client Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Integration test Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationIT.java | 72 ++++++++++++++++++- .../indices/recovery/FileChunkWriter.java | 2 + .../OngoingSegmentReplications.java | 2 +- .../RemoteSegmentFileChunkWriter.java | 5 ++ .../SegmentReplicationSourceHandler.java | 6 +- .../SegmentReplicationSourceHandlerTests.java | 3 + 7 files changed, 88 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc6c290253dbe..0930923805d96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) - Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) +- [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index a9b6787d87bdf..16e9d78b17826 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -33,17 +33,23 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collection; import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -65,6 +71,11 @@ public static void assumeFeatureFlag() { assumeTrue("Segment replication Feature flag is enabled", Boolean.parseBoolean(System.getProperty(FeatureFlags.REPLICATION_TYPE))); } + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + @Override public Settings indexSettings() { return Settings.builder() @@ -318,6 +329,65 @@ public void testReplicationAfterForceMerge() throws Exception { } } + public void testCancellation() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startNode(); + + final SegmentReplicationSourceService segmentReplicationSourceService = internalCluster().getInstance( + SegmentReplicationSourceService.class, + primaryNode + ); + final IndexShard primaryShard = getIndexShard(primaryNode); + + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + FileChunkRequest req = (FileChunkRequest) request; + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + if (req.name().endsWith("cfs") && req.lastChunk()) { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + final int docCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(docCount); + waitForDocs(docCount, indexer); + + flush(INDEX_NAME); + } + segmentReplicationSourceService.beforeIndexShardClosed(primaryShard.shardId(), primaryShard, indexSettings()); + latch.countDown(); + assertDocCounts(docCount, primaryNode); + } + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); diff --git a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java index cb43af3b82e09..f1cc7b8dd1d89 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java @@ -28,4 +28,6 @@ void writeFileChunk( int totalTranslogOps, ActionListener listener ); + + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 828aa29192fe3..1a97d334df58f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -126,7 +126,7 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener Date: Sun, 4 Sep 2022 21:55:47 +0530 Subject: [PATCH 33/78] Added RestLayer Changes for PIT stats (#4217) Signed-off-by: Ajay Kumar Movva --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 3 +++ .../rest/action/cat/RestIndicesAction.java | 27 +++++++++++++++++++ .../rest/action/cat/RestNodesAction.java | 16 +++++++++++ .../rest/action/cat/RestShardsAction.java | 15 +++++++++++ .../action/cat/RestShardsActionTests.java | 4 +-- 6 files changed, 64 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0930923805d96..c5af055dca8a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) +- Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) ### Changed diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index aa4abc7a11eae..f07a06aba4388 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -67,6 +67,9 @@ search.scroll_current .+ \n search.scroll_time .+ \n search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n segments.count .+ \n segments.memory .+ \n segments.index_writer_memory .+ \n diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index a8cdff5775478..f04d0ab712b39 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -597,6 +597,24 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("pri.search.scroll_total", "default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "sibling:pri;alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell("pri.search.point_in_time_current", "default:false;text-align:right;desc:open point in time contexts"); + + table.addCell( + "search.point_in_time_time", + "sibling:pri;alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell("pri.search.point_in_time_time", "default:false;text-align:right;desc:time point in time contexts held open"); + + table.addCell( + "search.point_in_time_total", + "sibling:pri;alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("pri.search.point_in_time_total", "default:false;text-align:right;desc:completed point in time contexts"); + table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments"); @@ -878,6 +896,15 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCurrent()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitTime()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCount()); + table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount()); table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 8d3081bec48e9..6346e5d23cd34 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -310,6 +310,19 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); table.addCell( @@ -519,6 +532,9 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCount()); SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 6bf24951fe6c9..5cb5a7876669e 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -225,6 +225,18 @@ protected Table getTableWithHeader(final RestRequest request) { "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open" ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:spc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:spti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -390,6 +402,9 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index ed3aa19afa146..a8679a087216d 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -134,8 +134,8 @@ public void testBuildTable() { assertThat(row.get(3).value, equalTo(shardRouting.state())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(69).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(70).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); } } } From 4ed09955e7013405f21a959a044f131d3bf224f1 Mon Sep 17 00:00:00 2001 From: Movva Ajaykumar Date: Tue, 6 Sep 2022 05:56:39 +0530 Subject: [PATCH 34/78] Modified cat shards test for pit stats (#4408) Signed-off-by: Ajay Kumar Movva --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 93 ++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5af055dca8a6..3c7757c7bd070 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) - [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) +- Fixed the `_cat/shards/10_basic.yml` test cases fix. ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index f07a06aba4388..6ebe273d552cc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,11 +1,14 @@ --- "Help": - skip: - version: " - 7.99.99" - reason: shard path stats were added in 8.0.0 + version: " - 2.9.99" + reason: point in time stats were added in 3.0.0 + features: node_selector - do: cat.shards: help: true + node_selector: + version: "3.0.0 - " - match: $body: | @@ -85,6 +88,92 @@ path.state .+ \n $/ --- +"Help before - 3.0.0": + - skip: + version: "3.0.0 - " + reason: point in time stats were added in 3.0.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: " - 2.9.99" + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + $/ +--- "Test cat shards output": - do: From ff2e4bf86bc00ff62b45a0acc21b9790946b0dc7 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Tue, 6 Sep 2022 17:27:58 +0530 Subject: [PATCH 35/78] [Remote Store] Add index specific setting for remote repository (#4253) * Add index specific setting for remote repository * Fix for failover incremental uploads Signed-off-by: Sachin Kale --- CHANGELOG.md | 1 + .../cluster/metadata/IndexMetadata.java | 58 +++++++++++++--- .../common/settings/IndexScopedSettings.java | 6 +- .../org/opensearch/index/IndexService.java | 2 +- .../org/opensearch/index/IndexSettings.java | 9 +++ .../shard/RemoteStoreRefreshListener.java | 7 ++ .../opensearch/index/IndexSettingsTests.java | 69 ++++++++++++++++++- 7 files changed, 139 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c7757c7bd070..4e033e1ffb2bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) - Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/OpenSearch/pull/4359)) +- Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 759891e88039b..cd1c92a8b109f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -285,6 +285,8 @@ public Iterator> settings() { public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + public static final String SETTING_REMOTE_STORE_REPOSITORY = "index.remote_store.repository"; + public static final String SETTING_REMOTE_TRANSLOG_STORE_ENABLED = "index.remote_store.translog.enabled"; /** * Used to specify if the index data should be persisted in the remote store. @@ -322,6 +324,50 @@ public Iterator> settings() { Property.Final ); + /** + * Used to specify remote store repository to use for this index. + */ + public static final Setting INDEX_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( + SETTING_REMOTE_STORE_REPOSITORY, + new Setting.Validator<>() { + + @Override + public void validate(final String value) {} + + @Override + public void validate(final String value, final Map, Object> settings) { + if (value == null || value.isEmpty()) { + throw new IllegalArgumentException( + "Setting " + INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey() + " should be provided with non-empty repository ID" + ); + } else { + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_STORE_REPOSITORY_SETTING); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + + private static void validateRemoteStoreSettingEnabled(final Map, Object> settings, Setting setting) { + final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (isRemoteSegmentStoreEnabled == false) { + throw new IllegalArgumentException( + "Settings " + + setting.getKey() + + " can ont be set/enabled when " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " is set to true" + ); + } + } + /** * Used to specify if the index translog operations should be persisted in the remote store. */ @@ -335,16 +381,8 @@ public void validate(final Boolean value) {} @Override public void validate(final Boolean value, final Map, Object> settings) { - final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); - if (isRemoteSegmentStoreEnabled == false && value == true) { - throw new IllegalArgumentException( - "Settings " - + INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey() - + " cannot be enabled when " - + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() - + " is set to " - + settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING) - ); + if (value == true) { + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a3fa2c7ee3112..7be9adc786f24 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -223,7 +223,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FeatureFlags.REPLICATION_TYPE, Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - Arrays.asList(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING) + Arrays.asList( + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + ) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index e1427df1c34ab..92f957633db84 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -511,7 +511,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { Directory remoteDirectory = remoteDirectoryFactory.newDirectory( - clusterService.state().metadata().clusterUUID(), + this.indexSettings.getRemoteStoreRepository(), this.indexSettings, path ); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 657cb1ee55cb9..9c7f4804755d4 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -560,6 +560,7 @@ public final class IndexSettings { private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; private final boolean isRemoteTranslogStoreEnabled; + private final String remoteStoreRepository; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -721,6 +722,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); + remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); @@ -979,6 +981,13 @@ public boolean isRemoteTranslogStoreEnabled() { return isRemoteTranslogStoreEnabled; } + /** + * Returns if remote store is enabled for this index. + */ + public String getRemoteStoreRepository() { + return remoteStoreRepository; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 0d32e8d56e4d2..a8ca9891d9743 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -59,6 +59,13 @@ public RemoteStoreRefreshListener(IndexShard indexShard) { .getDelegate()).getDelegate(); this.primaryTerm = indexShard.getOperationPrimaryTerm(); localSegmentChecksumMap = new HashMap<>(); + if (indexShard.shardRouting.primary()) { + try { + this.remoteDirectory.init(); + } catch (IOException e) { + logger.error("Exception while initialising RemoteSegmentStoreDirectory", e); + } + } } @Override diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e02eac85beafb..de5ef8851ae80 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -851,7 +851,7 @@ public void testEnablingRemoteTranslogStoreFailsWhenRemoteSegmentDisabled() { () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) ); assertEquals( - "Settings index.remote_store.translog.enabled cannot be enabled when index.remote_store.enabled is set to false", + "Settings index.remote_store.translog.enabled can ont be set/enabled when index.remote_store.enabled is set to true", iae.getMessage() ); } @@ -876,4 +876,71 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { ); assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } + + public void testRemoteRepositoryDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertNull(settings.getRemoteStoreRepository()); + } + + public void testRemoteRepositoryExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, "repo1") + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertEquals("repo1", settings.getRemoteStoreRepository()); + } + + public void testUpdateRemoteRepositoryFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store.repository", randomUnicodeOfLength(10)).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store.repository], not updateable"); + } + + public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put("index.remote_store.enabled", false) + .put("index.remote_store.repository", "repo1") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.repository can ont be set/enabled when index.remote_store.enabled is set to true", + iae.getMessage() + ); + } + + public void testSetRemoteRepositoryFailsWhenEmptyString() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.SEGMENT) + .put("index.remote_store.enabled", false) + .put("index.remote_store.repository", "") + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + ); + assertEquals("Setting index.remote_store.repository should be provided with non-empty repository ID", iae.getMessage()); + } } From b0e1f6abe58f545b87d759afe5794b6eac0be3c1 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 6 Sep 2022 09:02:12 -0700 Subject: [PATCH 36/78] [Semgnet Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test (#4414) * [Semgnet Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh * Update changelog entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationTarget.java | 2 +- .../SegmentReplicationTargetServiceTests.java | 20 +++++++++++-------- .../index/shard/IndexShardTestCase.java | 1 + 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e033e1ffb2bf..cb48b3aedeea5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) - [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) +- [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. ### Security diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 7c28406036ddd..6a9406aca13b9 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -160,9 +160,9 @@ public void startReplication(ActionListener listener) { final StepListener getFilesListener = new StepListener<>(); final StepListener finalizeListener = new StepListener<>(); + cancellableThreads.checkForCancel(); logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. - cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 1d253b0a9a300..f2eb635f24bbf 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -29,6 +30,7 @@ import java.util.concurrent.TimeUnit; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.doAnswer; @@ -37,6 +39,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.eq; +import static org.opensearch.indices.replication.SegmentReplicationState.Stage.CANCELLED; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { @@ -215,24 +218,25 @@ public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws I // Mocking response when startReplication is called on targetSpy we send a new checkpoint to serviceSpy and later reduce countdown // of latch. doAnswer(invocation -> { - final ActionListener listener = invocation.getArgument(0); + // short circuit loop on new checkpoint request + doReturn(null).when(serviceSpy).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); // a new checkpoint arrives before we've completed. serviceSpy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); - listener.onResponse(null); - latch.countDown(); + try { + invocation.callRealMethod(); + } catch (CancellableThreads.ExecutionCancelledException e) { + latch.countDown(); + } return null; }).when(targetSpy).startReplication(any()); - doNothing().when(targetSpy).onDone(); // start replication. This adds the target to on-ongoing replication collection serviceSpy.startReplication(targetSpy); - + latch.await(); // wait for the new checkpoint to arrive, before the listener completes. - latch.await(5, TimeUnit.SECONDS); - doNothing().when(targetSpy).startReplication(any()); + assertEquals(CANCELLED, targetSpy.state().getStage()); verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); verify(serviceSpy, times(1)).startReplication(eq(newPrimaryCheckpoint), eq(replicaShard), any()); - closeShards(replicaShard); } public void testNewCheckpointBehindCurrentCheckpoint() { diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 1b40cb4f2dfa3..0838a1fe87aa4 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1207,6 +1207,7 @@ public void getCheckpointMetadata( copyState.getPendingDeleteFiles() ) ); + copyState.decRef(); } catch (IOException e) { logger.error("Unexpected error computing CopyState", e); Assert.fail("Failed to compute copyState"); From f97cb4b4bcf8ba15ed251b9ff97053a2f4f4619d Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 6 Sep 2022 10:06:24 -0700 Subject: [PATCH 37/78] Segment Replication - Fix NoSuchFileException errors caused when computing metadata snapshot on primary shards. (#4366) * Segment Replication - Fix NoSuchFileException errors caused when computing metadata snapshot on primary shards. This change fixes the errors that occur when computing metadata snapshots on primary shards from the latest in-memory SegmentInfos. The error occurs when a segments_N file that is referenced by the in-memory infos is deleted as part of a concurrent commit. The segments themselves are incref'd by IndexWriter.incRefDeleter but the commit file (Segments_N) is not. This change resolves this by ignoring the segments_N file when computing metadata for CopyState and only sending incref'd segment files to replicas. Signed-off-by: Marc Handalian * Fix spotless. Signed-off-by: Marc Handalian * Update StoreTests.testCleanupAndPreserveLatestCommitPoint to assert additional segments are deleted. Signed-off-by: Marc Handalian * Rename snapshot to metadataMap in CheckpointInfoResponse. Signed-off-by: Marc Handalian * Refactor segmentReplicationDiff method to compute off two maps instead of MetadataSnapshots. Signed-off-by: Marc Handalian * Fix spotless. Signed-off-by: Marc Handalian * Revert catchall in SegmentReplicationSourceService. Signed-off-by: Marc Handalian * Revert log lvl change. Signed-off-by: Marc Handalian * Fix SegmentReplicationTargetTests Signed-off-by: Marc Handalian * Cleanup unused logger. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian Co-authored-by: Suraj Singh --- CHANGELOG.md | 1 + .../replication/SegmentReplicationIT.java | 50 ++++++- .../org/opensearch/index/store/Store.java | 133 +++++++++--------- .../replication/CheckpointInfoResponse.java | 30 ++-- .../SegmentReplicationSourceService.java | 7 +- .../replication/SegmentReplicationTarget.java | 36 ++--- .../indices/replication/common/CopyState.java | 28 +--- .../SegmentReplicationIndexShardTests.java | 7 +- .../opensearch/index/store/StoreTests.java | 131 ++++++++++++++--- .../OngoingSegmentReplicationsTests.java | 28 ++-- .../SegmentReplicationSourceHandlerTests.java | 8 +- .../SegmentReplicationSourceServiceTests.java | 4 +- .../SegmentReplicationTargetServiceTests.java | 2 +- .../SegmentReplicationTargetTests.java | 48 ++----- .../replication/common/CopyStateTests.java | 10 +- .../index/shard/IndexShardTestCase.java | 8 +- 16 files changed, 301 insertions(+), 230 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb48b3aedeea5..a2b6528783a39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) - [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) - [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) +- Fix NoSuchFileExceptions with segment replication when computing primary metadata snapshots ([#4366](https://github.com/opensearch-project/OpenSearch/pull/4366)) - [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 16e9d78b17826..9b2ab753832d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -9,7 +9,6 @@ package org.opensearch.indices.replication; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.lucene.index.SegmentInfos; import org.junit.BeforeClass; import org.opensearch.action.admin.indices.segments.IndexShardSegments; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -586,13 +585,56 @@ private void assertSegmentStats(int numberOfReplicas) throws IOException { ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); final DiscoveryNode replicaNode = state.nodes().resolveNode(replicaShardRouting.currentNodeId()); IndexShard indexShard = getIndexShard(replicaNode.getName()); - final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(indexShard.store().directory()); // calls to readCommit will fail if a valid commit point and all its segments are not in the store. - SegmentInfos.readCommit(indexShard.store().directory(), lastCommitSegmentsFileName); + indexShard.store().readLastCommittedSegmentsInfo(); } } } + public void testDropPrimaryDuringReplication() throws Exception { + final Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 6) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(Settings.EMPTY); + createIndex(INDEX_NAME, settings); + internalCluster().startDataOnlyNodes(6); + ensureGreen(INDEX_NAME); + + int initialDocCount = scaledRandomIntBetween(100, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + // don't wait for replication to complete, stop the primary immediately. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellow(INDEX_NAME); + + // start another replica. + internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + // index another doc and refresh - without this the new replica won't catch up. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + assertSegmentStats(6); + } + } + /** * Waits until the replica is caught up to the latest primary segments gen. * @throws Exception if assertion fails @@ -611,10 +653,12 @@ private void waitForReplicaUpdate() throws Exception { final List replicaShardSegments = segmentListMap.get(false); // if we don't have any segments yet, proceed. final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); + logger.debug("Primary Segments: {}", primaryShardSegments.getSegments()); if (primaryShardSegments.getSegments().isEmpty() == false) { final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); for (ShardSegments shardSegments : replicaShardSegments) { + logger.debug("Replica {} Segments: {}", shardSegments.getShardRouting(), shardSegments.getSegments()); final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() .stream() .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 58598ab2d08f4..9122c950a6ab6 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -105,6 +105,7 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -122,6 +123,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; +import static org.opensearch.index.store.Store.MetadataSnapshot.loadMetadata; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -334,6 +336,51 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio return new MetadataSnapshot(segmentInfos, directory, logger); } + /** + * Segment Replication method - Fetch a map of StoreFileMetadata for segments, ignoring Segment_N files. + * @param segmentInfos {@link SegmentInfos} from which to compute metadata. + * @return {@link Map} map file name to {@link StoreFileMetadata}. + */ + public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { + assert indexSettings.isSegRepEnabled(); + return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + } + + /** + * Segment Replication method + * Returns a diff between the Maps of StoreFileMetadata that can be used for getting list of files to copy over to a replica for segment replication. The returned diff will hold a list of files that are: + *

    + *
  • identical: they exist in both maps and they can be considered the same ie. they don't need to be recovered
  • + *
  • different: they exist in both maps but their they are not identical
  • + *
  • missing: files that exist in the source but not in the target
  • + *
+ */ + public static RecoveryDiff segmentReplicationDiff(Map source, Map target) { + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); + for (StoreFileMetadata value : source.values()) { + if (value.name().startsWith(IndexFileNames.SEGMENTS)) { + continue; + } + if (target.containsKey(value.name()) == false) { + missing.add(value); + } else { + final StoreFileMetadata fileMetadata = target.get(value.name()); + if (fileMetadata.isSame(value)) { + identical.add(value); + } else { + different.add(value); + } + } + } + return new RecoveryDiff( + Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), + Collections.unmodifiableList(missing) + ); + } + /** * Renames all the given files from the key of the map to the * value of the map. All successfully renamed files are removed from the map in-place. @@ -709,31 +756,34 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } /** - * This method deletes every file in this store that is not contained in either the remote or local metadata snapshots. + * Segment Replication method - + * This method deletes every file in this store that is not referenced by the passed in SegmentInfos or + * part of the latest on-disk commit point. * This method is used for segment replication when the in memory SegmentInfos can be ahead of the on disk segment file. * In this case files from both snapshots must be preserved. Verification has been done that all files are present on disk. * @param reason the reason for this cleanup operation logged for each deleted file - * @param localSnapshot The local snapshot from in memory SegmentInfos. + * @param infos {@link SegmentInfos} Files from this infos will be preserved on disk if present. * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. */ - public void cleanupAndPreserveLatestCommitPoint(String reason, MetadataSnapshot localSnapshot) throws IOException { + public void cleanupAndPreserveLatestCommitPoint(String reason, SegmentInfos infos) throws IOException { + assert indexSettings.isSegRepEnabled(); // fetch a snapshot from the latest on disk Segments_N file. This can be behind // the passed in local in memory snapshot, so we want to ensure files it references are not removed. metadataLock.writeLock().lock(); try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - cleanupFiles(reason, localSnapshot, getMetadata(readLastCommittedSegmentsInfo())); + cleanupFiles(reason, getMetadata(readLastCommittedSegmentsInfo()), infos.files(true)); } finally { metadataLock.writeLock().unlock(); } } - private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable MetadataSnapshot additionalSnapshot) + private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable Collection additionalFiles) throws IOException { assert metadataLock.isWriteLockedByCurrentThread(); for (String existingFile : directory.listAll()) { if (Store.isAutogenerated(existingFile) || localSnapshot.contains(existingFile) - || (additionalSnapshot != null && additionalSnapshot.contains(existingFile))) { + || (additionalFiles != null && additionalFiles.contains(existingFile))) { // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete // checksum) continue; @@ -825,17 +875,9 @@ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, l userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); latestSegmentInfos.setUserData(userData, true); latestSegmentInfos.commit(directory()); - - // similar to TrimUnsafeCommits, create a commit with an appending IW, this will delete old commits and ensure all files - // associated with the SegmentInfos.commit are fsynced. - final List existingCommits = DirectoryReader.listCommits(directory); - assert existingCommits.isEmpty() == false : "Expected at least one commit but none found"; - final IndexCommit lastIndexCommit = existingCommits.get(existingCommits.size() - 1); - assert latestSegmentInfos.getSegmentsFileName().equals(lastIndexCommit.getSegmentsFileName()); - try (IndexWriter writer = newAppendingIndexWriter(directory, lastIndexCommit)) { - writer.setLiveCommitData(lastIndexCommit.getUserData().entrySet()); - writer.commit(); - } + directory.sync(latestSegmentInfos.files(true)); + directory.syncMetaData(); + cleanupAndPreserveLatestCommitPoint("After commit", latestSegmentInfos); } finally { metadataLock.writeLock().unlock(); } @@ -1033,6 +1075,11 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg } static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger) throws IOException { + return loadMetadata(segmentInfos, directory, logger, false); + } + + static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger, boolean ignoreSegmentsFile) + throws IOException { long numDocs = Lucene.getNumDocs(segmentInfos); Map commitUserDataBuilder = new HashMap<>(); commitUserDataBuilder.putAll(segmentInfos.getUserData()); @@ -1067,8 +1114,10 @@ static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory director if (maxVersion == null) { maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; } - final String segmentsFile = segmentInfos.getSegmentsFileName(); - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + if (ignoreSegmentsFile == false) { + final String segmentsFile = segmentInfos.getSegmentsFileName(); + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + } return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs); } @@ -1148,7 +1197,6 @@ public Map asMap() { * Helper method used to group store files according to segment and commit. * * @see MetadataSnapshot#recoveryDiff(MetadataSnapshot) - * @see MetadataSnapshot#segmentReplicationDiff(MetadataSnapshot) */ private Iterable> getGroupedFilesIterable() { final Map> perSegment = new HashMap<>(); @@ -1241,51 +1289,6 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { return recoveryDiff; } - /** - * Segment Replication method - * Returns a diff between the two snapshots that can be used for getting list of files to copy over to a replica for segment replication. The given snapshot is treated as the - * target and this snapshot as the source. The returned diff will hold a list of files that are: - *
    - *
  • identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered
  • - *
  • different: they exist in both snapshots but their they are not identical
  • - *
  • missing: files that exist in the source but not in the target
  • - *
- */ - public RecoveryDiff segmentReplicationDiff(MetadataSnapshot recoveryTargetSnapshot) { - final List identical = new ArrayList<>(); - final List different = new ArrayList<>(); - final List missing = new ArrayList<>(); - final ArrayList identicalFiles = new ArrayList<>(); - for (List segmentFiles : getGroupedFilesIterable()) { - identicalFiles.clear(); - boolean consistent = true; - for (StoreFileMetadata meta : segmentFiles) { - StoreFileMetadata storeFileMetadata = recoveryTargetSnapshot.get(meta.name()); - if (storeFileMetadata == null) { - // Do not consider missing files as inconsistent in SegRep as replicas may lag while primary updates - // documents and generate new files specific to a segment - missing.add(meta); - } else if (storeFileMetadata.isSame(meta) == false) { - consistent = false; - different.add(meta); - } else { - identicalFiles.add(meta); - } - } - if (consistent) { - identical.addAll(identicalFiles); - } else { - different.addAll(identicalFiles); - } - } - RecoveryDiff recoveryDiff = new RecoveryDiff( - Collections.unmodifiableList(identical), - Collections.unmodifiableList(different), - Collections.unmodifiableList(missing) - ); - return recoveryDiff; - } - /** * Returns the number of files in this snapshot */ diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java index a73a3b54184da..48c2dfd30f589 100644 --- a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -10,13 +10,12 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.transport.TransportResponse; import java.io.IOException; -import java.util.Set; +import java.util.Map; /** * Response returned from a {@link SegmentReplicationSource} that includes the file metadata, and SegmentInfos @@ -28,52 +27,41 @@ public class CheckpointInfoResponse extends TransportResponse { private final ReplicationCheckpoint checkpoint; - private final Store.MetadataSnapshot snapshot; + private final Map metadataMap; private final byte[] infosBytes; - // pendingDeleteFiles are segments that have been merged away in the latest in memory SegmentInfos - // but are still referenced by the latest commit point (Segments_N). - private final Set pendingDeleteFiles; public CheckpointInfoResponse( final ReplicationCheckpoint checkpoint, - final Store.MetadataSnapshot snapshot, - final byte[] infosBytes, - final Set additionalFiles + final Map metadataMap, + final byte[] infosBytes ) { this.checkpoint = checkpoint; - this.snapshot = snapshot; + this.metadataMap = metadataMap; this.infosBytes = infosBytes; - this.pendingDeleteFiles = additionalFiles; } public CheckpointInfoResponse(StreamInput in) throws IOException { this.checkpoint = new ReplicationCheckpoint(in); - this.snapshot = new Store.MetadataSnapshot(in); + this.metadataMap = in.readMap(StreamInput::readString, StoreFileMetadata::new); this.infosBytes = in.readByteArray(); - this.pendingDeleteFiles = in.readSet(StoreFileMetadata::new); } @Override public void writeTo(StreamOutput out) throws IOException { checkpoint.writeTo(out); - snapshot.writeTo(out); + out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); out.writeByteArray(infosBytes); - out.writeCollection(pendingDeleteFiles); } public ReplicationCheckpoint getCheckpoint() { return checkpoint; } - public Store.MetadataSnapshot getSnapshot() { - return snapshot; + public Map getMetadataMap() { + return metadataMap; } public byte[] getInfosBytes() { return infosBytes; } - - public Set getPendingDeleteFiles() { - return pendingDeleteFiles; - } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index db3f87201b774..91b8243440ac5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -133,12 +133,7 @@ public void messageReceived(CheckpointInfoRequest request, TransportChannel chan ); final CopyState copyState = ongoingSegmentReplications.prepareForReplication(request, segmentSegmentFileChunkWriter); channel.sendResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); timer.stop(); logger.trace( diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 6a9406aca13b9..26bec2203c599 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -23,6 +23,7 @@ import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexShard; @@ -37,12 +38,9 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; +import java.util.Collections; +import java.util.Map; /** * Represents the target of a replication event. @@ -178,9 +176,7 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener filesToFetch = new ArrayList(diff.missing); - Set storeFiles = new HashSet<>(Arrays.asList(store.directory().listAll())); - final Set pendingDeleteFiles = checkpointInfo.getPendingDeleteFiles() - .stream() - .filter(f -> storeFiles.contains(f.name()) == false) - .collect(Collectors.toSet()); - - filesToFetch.addAll(pendingDeleteFiles); - logger.trace("Files to fetch {}", filesToFetch); - - for (StoreFileMetadata file : filesToFetch) { + for (StoreFileMetadata file : diff.missing) { state.getIndex().addFileDetail(file.name(), file.length(), false); } // always send a req even if not fetching files so the primary can clear the copyState for this shard. state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, store, getFilesListener); + source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), diff.missing, store, getFilesListener); } private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, ActionListener listener) { @@ -231,7 +217,7 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, responseCheckpoint.getSegmentsGen() ); indexShard.finalizeReplication(infos, responseCheckpoint.getSeqNo()); - store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", store.getMetadata(infos)); + store.cleanupAndPreserveLatestCommitPoint("finalize - clean with in memory infos", infos); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are @@ -280,11 +266,13 @@ private ChecksumIndexInput toIndexInput(byte[] input) { ); } - Store.MetadataSnapshot getMetadataSnapshot() throws IOException { + Map getMetadataMap() throws IOException { if (indexShard.getSegmentInfosSnapshot() == null) { - return Store.MetadataSnapshot.EMPTY; + return Collections.emptyMap(); + } + try (final GatedCloseable snapshot = indexShard.getSegmentInfosSnapshot()) { + return store.getSegmentMetadataMap(snapshot.get()); } - return store.getMetadata(indexShard.getSegmentInfosSnapshot().get()); } @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java index c0e0b4dee2b3f..1dd0886fd2f36 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -15,14 +15,12 @@ import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.HashSet; -import java.util.Set; +import java.util.Map; /** * An Opensearch-specific version of Lucene's CopyState class that @@ -37,8 +35,7 @@ public class CopyState extends AbstractRefCounted { private final ReplicationCheckpoint requestedReplicationCheckpoint; /** Actual ReplicationCheckpoint returned by the shard */ private final ReplicationCheckpoint replicationCheckpoint; - private final Store.MetadataSnapshot metadataSnapshot; - private final HashSet pendingDeleteFiles; + private final Map metadataMap; private final byte[] infosBytes; private GatedCloseable commitRef; private final IndexShard shard; @@ -49,7 +46,7 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar this.shard = shard; this.segmentInfosRef = shard.getSegmentInfosSnapshot(); SegmentInfos segmentInfos = this.segmentInfosRef.get(); - this.metadataSnapshot = shard.store().getMetadata(segmentInfos); + this.metadataMap = shard.store().getSegmentMetadataMap(segmentInfos); this.replicationCheckpoint = new ReplicationCheckpoint( shard.shardId(), shard.getOperationPrimaryTerm(), @@ -57,18 +54,7 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar shard.getProcessedLocalCheckpoint(), segmentInfos.getVersion() ); - - // Send files that are merged away in the latest SegmentInfos but not in the latest on disk Segments_N. - // This ensures that the store on replicas is in sync with the store on primaries. this.commitRef = shard.acquireLastIndexCommit(false); - Store.MetadataSnapshot metadata = shard.store().getMetadata(this.commitRef.get()); - final Store.RecoveryDiff diff = metadata.recoveryDiff(this.metadataSnapshot); - this.pendingDeleteFiles = new HashSet<>(diff.missing); - if (this.pendingDeleteFiles.isEmpty()) { - // If there are no additional files we can release the last commit immediately. - this.commitRef.close(); - this.commitRef = null; - } ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); // resource description and name are not used, but resource description cannot be null @@ -95,18 +81,14 @@ public ReplicationCheckpoint getCheckpoint() { return replicationCheckpoint; } - public Store.MetadataSnapshot getMetadataSnapshot() { - return metadataSnapshot; + public Map getMetadataMap() { + return metadataMap; } public byte[] getInfosBytes() { return infosBytes; } - public Set getPendingDeleteFiles() { - return pendingDeleteFiles; - } - public IndexShard getShard() { return shard; } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 88a3bdad53d0c..3af882a8087ec 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -489,12 +489,7 @@ private void resolveCheckpointInfoResponseListener(ActionListener additionalSegments = new ArrayList<>(); + for (String file : store.directory().listAll()) { + if (commitMetadata.contains(file) == false) { + additionalSegments.add(file); + } + } + assertFalse(additionalSegments.isEmpty()); + + // clean up everything not in the latest commit point. + store.cleanupAndPreserveLatestCommitPoint("test", store.readLastCommittedSegmentsInfo()); + + // we want to ensure commitMetadata files are preserved after calling cleanup + for (String existingFile : store.directory().listAll()) { + assertTrue(commitMetadata.contains(existingFile)); + assertFalse(additionalSegments.contains(existingFile)); + } + deleteContent(store.directory()); + IOUtils.close(store); + } + + public void testGetSegmentMetadataMap() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store( + shardId, + SEGMENT_REPLICATION_INDEX_SETTINGS, + new NIOFSDirectory(createTempDir()), + new DummyShardLock(shardId) + ); + store.createEmpty(Version.LATEST); + final Map metadataSnapshot = store.getSegmentMetadataMap(store.readLastCommittedSegmentsInfo()); + // no docs indexed only _N file exists. + assertTrue(metadataSnapshot.isEmpty()); + + // commit some docs to create a commit point. + commitRandomDocs(store); + + final Map snapshotAfterCommit = store.getSegmentMetadataMap(store.readLastCommittedSegmentsInfo()); + assertFalse(snapshotAfterCommit.isEmpty()); + assertFalse(snapshotAfterCommit.keySet().stream().anyMatch((name) -> name.startsWith(IndexFileNames.SEGMENTS))); + store.close(); + } + + public void testSegmentReplicationDiff() { + final String segmentName = "_0.si"; + final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata(segmentName, 1L, "0", Version.LATEST); + // source has file target is missing. + Store.RecoveryDiff diff = Store.segmentReplicationDiff(Map.of(segmentName, SEGMENT_FILE), Collections.emptyMap()); + assertEquals(List.of(SEGMENT_FILE), diff.missing); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + + // target has file not on source. + diff = Store.segmentReplicationDiff(Collections.emptyMap(), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + + // source and target have identical file. + diff = Store.segmentReplicationDiff(Map.of(segmentName, SEGMENT_FILE), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertEquals(List.of(SEGMENT_FILE), diff.identical); + + // source has diff copy of same file as target. + StoreFileMetadata SOURCE_DIFF_FILE = new StoreFileMetadata(segmentName, 1L, "abc", Version.LATEST); + diff = Store.segmentReplicationDiff(Map.of(segmentName, SOURCE_DIFF_FILE), Map.of(segmentName, SEGMENT_FILE)); + assertTrue(diff.missing.isEmpty()); + assertEquals(List.of(SOURCE_DIFF_FILE), diff.different); + assertTrue(diff.identical.isEmpty()); + + // ignore _N files if included in source map. + final String segmentsFile = IndexFileNames.SEGMENTS.concat("_2"); + StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(segmentsFile, 1L, "abc", Version.LATEST); + diff = Store.segmentReplicationDiff(Map.of(segmentsFile, SEGMENTS_FILE), Collections.emptyMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.identical.isEmpty()); + } + + private void commitRandomDocs(Store store) throws IOException { + IndexWriter writer = indexRandomDocs(store); + writer.commit(); + writer.close(); + } + + private IndexWriter indexRandomDocs(Store store) throws IOException { IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec( TestUtil.getDefaultCodec() ); + indexWriterConfig.setCommitOnClose(false); indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig); int docs = 1 + random().nextInt(100); @@ -1171,21 +1281,6 @@ public void testcleanupAndPreserveLatestCommitPoint() throws IOException { ); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); - writer.commit(); - writer.close(); - - Store.MetadataSnapshot commitMetadata = store.getMetadata(); - - Store.MetadataSnapshot refreshMetadata = Store.MetadataSnapshot.EMPTY; - - store.cleanupAndPreserveLatestCommitPoint("test", refreshMetadata); - - // we want to ensure commitMetadata files are preserved after calling cleanup - for (String existingFile : store.directory().listAll()) { - assert (commitMetadata.contains(existingFile) == true); - } - - deleteContent(store.directory()); - IOUtils.close(store); + return writer; } } diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index f49ee0471b5e8..bd3106454f49b 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -11,28 +11,30 @@ import org.junit.Assert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.shard.ShardId; -import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -55,15 +57,18 @@ public class OngoingSegmentReplicationsTests extends IndexShardTestCase { private GetSegmentFilesRequest getSegmentFilesRequest; - final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final Settings settings = Settings.builder() + .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); @Override public void setUp() throws Exception { super.setUp(); - primary = newStartedShard(true); - replica = newShard(primary.shardId(), false); + primary = newStartedShard(true, settings); + replica = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replica, primary, true); replicaDiscoveryNode = replica.recoveryState().getTargetNode(); primaryDiscoveryNode = replica.recoveryState().getSourceNode(); @@ -93,6 +98,8 @@ public void tearDown() throws Exception { } public void testPrepareAndSendSegments() throws IOException { + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); final CheckpointInfoRequest request = new CheckpointInfoRequest( 1L, @@ -112,17 +119,14 @@ public void testPrepareAndSendSegments() throws IOException { 1L, replica.routingEntry().allocationId().getId(), replicaDiscoveryNode, - new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + new ArrayList<>(copyState.getMetadataMap().values()), testCheckpoint ); - final Collection expectedFiles = List.copyOf(primary.store().getMetadata().asMap().values()); replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { @Override public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { - assertEquals(1, getSegmentFilesResponse.files.size()); - assertEquals(1, expectedFiles.size()); - assertTrue(expectedFiles.stream().findFirst().get().isSame(getSegmentFilesResponse.files.get(0))); + assertEquals(copyState.getMetadataMap().size(), getSegmentFilesResponse.files.size()); assertEquals(0, copyState.refCount()); assertFalse(replications.isInCopyStateMap(request.getCheckpoint())); assertEquals(0, replications.size()); @@ -181,7 +185,7 @@ public void testCancelReplication_AfterSendFilesStarts() throws IOException, Int 1L, replica.routingEntry().allocationId().getId(), replicaDiscoveryNode, - new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + new ArrayList<>(copyState.getMetadataMap().values()), testCheckpoint ); replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 5f6ec7e505805..cde5cd980a91d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -19,6 +19,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -76,7 +77,7 @@ public void testSendFiles() throws IOException { 1 ); - final List expectedFiles = List.copyOf(copyState.getMetadataSnapshot().asMap().values()); + final List expectedFiles = List.copyOf(copyState.getMetadataMap().values()); final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( 1L, @@ -137,6 +138,9 @@ public void onFailure(Exception e) { } public void testSendFileFails() throws IOException { + // index some docs on the primary so a segment is created. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); chunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> listener.onFailure( new OpenSearchException("Test") ); @@ -153,7 +157,7 @@ public void testSendFileFails() throws IOException { 1 ); - final List expectedFiles = List.copyOf(copyState.getMetadataSnapshot().asMap().values()); + final List expectedFiles = List.copyOf(copyState.getMetadataMap().values()); final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( 1L, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index 4bfdd81d50a1e..6183f1e5d9dfb 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -121,9 +121,7 @@ public void testCheckpointInfo() { public void onResponse(CheckpointInfoResponse response) { assertEquals(testCheckpoint, response.getCheckpoint()); assertNotNull(response.getInfosBytes()); - // CopyStateTests sets up one pending delete file and one committed segments file - assertEquals(1, response.getPendingDeleteFiles().size()); - assertEquals(1, response.getSnapshot().size()); + assertEquals(1, response.getMetadataMap().size()); } @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f2eb635f24bbf..7437cb22e44d1 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -62,7 +62,7 @@ public void setUp() throws Exception { .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - primaryShard = newStartedShard(true); + primaryShard = newStartedShard(true, settings); replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replicaShard, primaryShard, true); checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 11217a46b3c69..f8341573770a6 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -18,7 +18,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.Directory; @@ -51,7 +50,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Random; import java.util.Arrays; @@ -71,26 +69,13 @@ public class SegmentReplicationTargetTests extends IndexShardTestCase { private ReplicationCheckpoint repCheckpoint; private ByteBuffersDataOutput buffer; - private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); - private static final StoreFileMetadata SEGMENTS_FILE_DIFF = new StoreFileMetadata( - IndexFileNames.SEGMENTS, - 5L, - "different", - Version.LATEST - ); - private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); + private static final String SEGMENT_NAME = "_0.si"; + private static final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata(SEGMENT_NAME, 1L, "0", Version.LATEST); + private static final StoreFileMetadata SEGMENT_FILE_DIFF = new StoreFileMetadata(SEGMENT_NAME, 5L, "different", Version.LATEST); - private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), - null, - 0 - ); + private static final Map SI_SNAPSHOT = Map.of(SEGMENT_FILE.name(), SEGMENT_FILE); - private static final Store.MetadataSnapshot SI_SNAPSHOT_DIFFERENT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE_DIFF.name(), SEGMENTS_FILE_DIFF), - null, - 0 - ); + private static final Map SI_SNAPSHOT_DIFFERENT = Map.of(SEGMENT_FILE_DIFF.name(), SEGMENT_FILE_DIFF); private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", @@ -135,7 +120,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -146,9 +131,8 @@ public void getSegmentFiles( Store store, ActionListener listener ) { - assertEquals(filesToFetch.size(), 2); - assert (filesToFetch.contains(SEGMENTS_FILE)); - assert (filesToFetch.contains(PENDING_DELETE_FILE)); + assertEquals(1, filesToFetch.size()); + assert (filesToFetch.contains(SEGMENT_FILE)); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -230,7 +214,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -273,7 +257,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -318,7 +302,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -362,7 +346,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE))); + listener.onResponse(new CheckpointInfoResponse(checkpoint, SI_SNAPSHOT, buffer.toArrayCopy())); } @Override @@ -380,7 +364,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener.class ); segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); - when(segrepTarget.getMetadataSnapshot()).thenReturn(SI_SNAPSHOT_DIFFERENT); + when(segrepTarget.getMetadataMap()).thenReturn(SI_SNAPSHOT_DIFFERENT); segrepTarget.startReplication(new ActionListener() { @Override public void onResponse(Void replicationResponse) { @@ -413,9 +397,7 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1), buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE)) - ); + listener.onResponse(new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1).asMap(), buffer.toArrayCopy())); } @Override @@ -434,7 +416,7 @@ public void getSegmentFiles( ); segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); - when(segrepTarget.getMetadataSnapshot()).thenReturn(storeMetadataSnapshots.get(0)); + when(segrepTarget.getMetadataMap()).thenReturn(storeMetadataSnapshots.get(0).asMap()); segrepTarget.startReplication(new ActionListener() { @Override public void onResponse(Void replicationResponse) { diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index a6f0cf7e98411..77a4a6d22039e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -22,7 +22,6 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; -import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -32,6 +31,7 @@ public class CopyStateTests extends IndexShardTestCase { private static final long EXPECTED_LONG_VALUE = 1L; private static final ShardId TEST_SHARD_ID = new ShardId("testIndex", "testUUID", 0); private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); + private static final StoreFileMetadata SEGMENT_FILE = new StoreFileMetadata("_0.si", 1L, "0", Version.LATEST); private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); private static final Store.MetadataSnapshot COMMIT_SNAPSHOT = new Store.MetadataSnapshot( @@ -41,7 +41,7 @@ public class CopyStateTests extends IndexShardTestCase { ); private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( - Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), + Map.of(SEGMENT_FILE.name(), SEGMENT_FILE), null, 0 ); @@ -61,10 +61,6 @@ public void testCopyStateCreation() throws IOException { // version was never set so this should be zero assertEquals(0, checkpoint.getSegmentInfosVersion()); assertEquals(EXPECTED_LONG_VALUE, checkpoint.getPrimaryTerm()); - - Set pendingDeleteFiles = copyState.getPendingDeleteFiles(); - assertEquals(1, pendingDeleteFiles.size()); - assertTrue(pendingDeleteFiles.contains(PENDING_DELETE_FILE)); } public static IndexShard createMockIndexShard() throws IOException { @@ -78,7 +74,7 @@ public static IndexShard createMockIndexShard() throws IOException { SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); when(mockShard.getSegmentInfosSnapshot()).thenReturn(new GatedCloseable<>(testSegmentInfos, () -> {})); - when(mockStore.getMetadata(testSegmentInfos)).thenReturn(SI_SNAPSHOT); + when(mockStore.getSegmentMetadataMap(testSegmentInfos)).thenReturn(SI_SNAPSHOT.asMap()); IndexCommit mockIndexCommit = mock(IndexCommit.class); when(mockShard.acquireLastIndexCommit(false)).thenReturn(new GatedCloseable<>(mockIndexCommit, () -> {})); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 0838a1fe87aa4..073dc4b84472e 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -134,6 +134,7 @@ import java.io.IOException; import java.util.ArrayList; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -1200,12 +1201,7 @@ public void getCheckpointMetadata( try { final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); listener.onResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); copyState.decRef(); } catch (IOException e) { From 1889d966542355aea0a3839931cf4525e429208d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 13:37:51 -0400 Subject: [PATCH 38/78] Bump org.gradle.test-retry from 1.4.0 to 1.4.1 (#4411) * Bump org.gradle.test-retry from 1.4.0 to 1.4.1 Bumps org.gradle.test-retry from 1.4.0 to 1.4.1. --- updated-dependencies: - dependency-name: org.gradle.test-retry dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2b6528783a39..d04b754531b0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) - Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) +### Dependencies +- Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 @@ -69,4 +71,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/build.gradle b/build.gradle index a1f4f2d04883a..56c5610124958 100644 --- a/build.gradle +++ b/build.gradle @@ -56,7 +56,7 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.10.0" apply false - id "org.gradle.test-retry" version "1.4.0" apply false + id "org.gradle.test-retry" version "1.4.1" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } From fb64a856cd3e097b42a43a2fbb9ec659965ea9d5 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 6 Sep 2022 18:06:45 -0400 Subject: [PATCH 39/78] Revert to Netty 4.1.79.Final (#4428) Signed-off-by: Craig Perkins Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 - buildSrc/version.properties | 2 +- modules/transport-netty4/build.gradle | 8 -------- .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.80.Final.jar.sha1 | 1 - ...tty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 + ...tty-transport-native-unix-common-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 | 1 - ...tty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 + ...tty-transport-native-unix-common-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-all-4.1.80.Final.jar.sha1 | 1 - plugins/transport-nio/build.gradle | 6 ------ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.80.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.80.Final.jar.sha1 | 1 - 50 files changed, 24 insertions(+), 39 deletions(-) create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d04b754531b0e..b9082ed039712 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,7 +23,6 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) -- Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/OpenSearch/pull/4359)) - Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 6cc24a3f09244..072dcc4578977 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.80.Final +netty = 4.1.79.Final joda = 2.10.13 # client dependencies diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 8bbe0bf2ef65f..5d2047d7f18a2 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -144,14 +144,6 @@ thirdPartyAudit { 'org.apache.log4j.Level', 'org.apache.log4j.Logger', - // from io.netty.handler.ssl.OpenSslEngine (netty) - 'org.bouncycastle.openssl.PEMEncryptedKeyPair', - 'org.bouncycastle.openssl.PEMParser', - 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', - 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', - 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', - 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', - // from io.netty.handler.ssl.OpenSslEngine (netty) 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.CertificateCompressionAlgo', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9e4d0b7f754 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 deleted file mode 100644 index 471fe8b211df2..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c0920231d79a8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 deleted file mode 100644 index 0f8e3bebe1532..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a3f650da5abbd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 deleted file mode 100644 index d18720d164335..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..f2989024cfce1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 deleted file mode 100644 index d96a286b98493..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..faa7b099406a3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 deleted file mode 100644 index d256e77b7024c..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e314f164da69 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 deleted file mode 100644 index 022ad6bc93dba..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..af550935bb911 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 deleted file mode 100644 index ad0f71b569377..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c6e18efb3ad3d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 deleted file mode 100644 index 2bfb4f377d89b..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..7f984663dfa85 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 deleted file mode 100644 index 998e6e8560724..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a1753b194ea31 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 deleted file mode 100644 index 2dab7f40b02b7..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6926d2ea779f41071ecb1948d880dfbb3a6ee126 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..f2989024cfce1 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 deleted file mode 100644 index d96a286b98493..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..913f0e7685c86 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 deleted file mode 100644 index 625344e6cfb0a..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00025b767be3425f3b31a34ee095c85619169f17 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..dbb072f3f665f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 deleted file mode 100644 index c3184ec5ff7d3..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9b3b42ff805723fb98120f5ab2019c53e71da91b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a5d1be00d9c29 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 deleted file mode 100644 index bb6a3502a729f..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6b1602f80b6235b0b7d53bc5e9c1a6cd11c1b804 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..7f984663dfa85 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 deleted file mode 100644 index 998e6e8560724..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..724950db96f09 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 deleted file mode 100644 index ae6eb1d85f1ea..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39e73b76a3ec65df731b371179e15f2c3e4e7575 \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index c5b401de60c8c..a7e8c42a4e2d3 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -83,12 +83,6 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', - 'org.bouncycastle.openssl.PEMEncryptedKeyPair', - 'org.bouncycastle.openssl.PEMParser', - 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', - 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', - 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', - 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9e4d0b7f754 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 deleted file mode 100644 index 471fe8b211df2..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c0920231d79a8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 deleted file mode 100644 index 0f8e3bebe1532..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a3f650da5abbd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 deleted file mode 100644 index d18720d164335..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..faa7b099406a3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 deleted file mode 100644 index d256e77b7024c..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e314f164da69 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 deleted file mode 100644 index 022ad6bc93dba..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..af550935bb911 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 deleted file mode 100644 index ad0f71b569377..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c6e18efb3ad3d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 deleted file mode 100644 index 2bfb4f377d89b..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file From bff9d0fa6c5d56943a02344768ae37db54b1c71d Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Wed, 7 Sep 2022 04:09:00 +0000 Subject: [PATCH 40/78] [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles. (#4426) * Fix timeout issue by calculating time needed to process getSegmentFiles. Signed-off-by: Rishikesh1159 * Formatting sizeOfSegmentFiles for time calculation. Signed-off-by: Rishikesh1159 * Addressing comments and applying spotless check. Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh1159 --- CHANGELOG.md | 3 ++- .../PrimaryShardReplicationSource.java | 18 +++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9082ed039712..d8373e9904d7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix NoSuchFileExceptions with segment replication when computing primary metadata snapshots ([#4366](https://github.com/opensearch-project/OpenSearch/pull/4366)) - [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. +- [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) @@ -70,4 +71,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index aa0b5416dd0ff..8107f99723eaf 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -13,11 +13,13 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; import java.util.List; @@ -78,6 +80,17 @@ public void getSegmentFiles( ) { final Writeable.Reader reader = GetSegmentFilesResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r); + // Few of the below assumptions and calculations are added for experimental release of segment replication feature in 2.3 + // version. These will be changed in next release. + + // Storing the size of files to fetch in bytes. + final long sizeOfSegmentFiles = filesToFetch.stream().mapToLong(file -> file.length()).sum(); + + // Maximum size of files to fetch (segment files) in bytes, that can be processed in 1 minute for a m5.xlarge machine. + long baseSegmentFilesSize = 100000000; + + // Formula for calculating time needed to process a replication event's files to fetch process + final long timeToGetSegmentFiles = 1 + (sizeOfSegmentFiles / baseSegmentFilesSize); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( replicationId, targetAllocationId, @@ -85,7 +98,10 @@ public void getSegmentFiles( filesToFetch, checkpoint ); - transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); + final TransportRequestOptions options = TransportRequestOptions.builder() + .withTimeout(TimeValue.timeValueMinutes(timeToGetSegmentFiles)) + .build(); + transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, options, responseListener, reader); } @Override From ccf575a135c8c7512d9a3cfc343a90ca37ea1d80 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 7 Sep 2022 10:09:56 -0400 Subject: [PATCH 41/78] [Bug]: gradle check failing with java heap OutOfMemoryError (#4328) * [Bug]: gradle check failing with java heap OutOfMemoryError Signed-off-by: Andriy Redko * Fork JavaCompile task Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + build.gradle | 10 +++++++ .../HierarchyCircuitBreakerService.java | 15 +++++++++-- .../settings/MemorySizeSettingsTests.java | 27 +++++++++++++------ 4 files changed, 43 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8373e9904d7f..bcbd7f9ca88e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) - Fixed the `_cat/shards/10_basic.yml` test cases fix. - [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) +- [Bug]: gradle check failing with java heap OutOfMemoryError (([#4328](https://github.com/opensearch-project/OpenSearch/ ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/build.gradle b/build.gradle index 56c5610124958..bcae5bc3884a7 100644 --- a/build.gradle +++ b/build.gradle @@ -264,6 +264,12 @@ tasks.register("branchConsistency") { allprojects { // configure compiler options tasks.withType(JavaCompile).configureEach { JavaCompile compile -> + options.fork = true + + configure(options.forkOptions) { + memoryMaximumSize = project.property('options.forkOptions.memoryMaximumSize') + } + // See please https://bugs.openjdk.java.net/browse/JDK-8209058 if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_11) { compile.options.compilerArgs << '-Werror' @@ -389,6 +395,10 @@ allprojects { // the dependency is added. gradle.projectsEvaluated { allprojects { + project.tasks.withType(JavaForkOptions) { + maxHeapSize project.property('options.forkOptions.memoryMaximumSize') + } + if (project.path == ':test:framework') { // :test:framework:test cannot run before and after :server:test return diff --git a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java index c0056aab3fb16..40bb4894c7397 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -559,8 +559,19 @@ static long fallbackRegionSize(JvmInfo jvmInfo) { // https://hg.openjdk.java.net/jdk/jdk/file/e7d0ec2d06e8/src/hotspot/share/gc/g1/heapRegion.cpp#l67 // based on this JDK "bug": // https://bugs.openjdk.java.net/browse/JDK-8241670 - long averageHeapSize = (jvmInfo.getMem().getHeapMax().getBytes() + JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()) / 2; - long regionSize = Long.highestOneBit(averageHeapSize / 2048); + // JDK-17 updates: + // https://github.com/openjdk/jdk17u/blob/master/src/hotspot/share/gc/g1/heapRegionBounds.hpp + // https://github.com/openjdk/jdk17u/blob/master/src/hotspot/share/gc/g1/heapRegion.cpp#L67 + long regionSizeUnrounded = Math.min( + Math.max(JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 2048, ByteSizeUnit.MB.toBytes(1)), + ByteSizeUnit.MB.toBytes(32) + ); + + long regionSize = Long.highestOneBit(regionSizeUnrounded); + if (regionSize != regionSizeUnrounded) { + regionSize <<= 1; /* next power of 2 */ + } + if (regionSize < ByteSizeUnit.MB.toBytes(1)) { regionSize = ByteSizeUnit.MB.toBytes(1); } else if (regionSize > ByteSizeUnit.MB.toBytes(32)) { diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java index f64b45e80dbca..2c7251818e2bc 100644 --- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java @@ -33,7 +33,6 @@ package org.opensearch.common.settings; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.indices.IndexingMemoryController; @@ -83,9 +82,13 @@ public void testIndicesRequestCacheSetting() { } public void testCircuitBreakerSettings() { - // default is chosen based on actual heap size + final Settings settings = Settings.builder() + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), randomBoolean()) + .build(); + + // default is chosen based on USE_REAL_MEMORY_USAGE_SETTING setting double defaultTotalPercentage; - if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < new ByteSizeValue(1, ByteSizeUnit.GB).getBytes()) { + if (HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.get(settings)) { defaultTotalPercentage = 0.95d; } else { defaultTotalPercentage = 0.7d; @@ -93,22 +96,26 @@ public void testCircuitBreakerSettings() { assertMemorySizeSetting( HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.total.limit", - new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * defaultTotalPercentage)) + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * defaultTotalPercentage)), + settings ); assertMemorySizeSetting( HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.fielddata.limit", - new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)) + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), + settings ); assertMemorySizeSetting( HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.request.limit", - new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)) + new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)), + settings ); assertMemorySizeSetting( HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING, "network.breaker.inflight_requests.limit", - new ByteSizeValue((JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())) + new ByteSizeValue((JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())), + settings ); } @@ -121,10 +128,14 @@ public void testIndicesFieldDataCacheSetting() { } private void assertMemorySizeSetting(Setting setting, String settingKey, ByteSizeValue defaultValue) { + assertMemorySizeSetting(setting, settingKey, defaultValue, Settings.EMPTY); + } + + private void assertMemorySizeSetting(Setting setting, String settingKey, ByteSizeValue defaultValue, Settings settings) { assertThat(setting, notNullValue()); assertThat(setting.getKey(), equalTo(settingKey)); assertThat(setting.getProperties(), hasItem(Property.NodeScope)); - assertThat(setting.getDefault(Settings.EMPTY), equalTo(defaultValue)); + assertThat(setting.getDefault(settings), equalTo(defaultValue)); Settings settingWithPercentage = Settings.builder().put(settingKey, "25%").build(); assertThat( setting.get(settingWithPercentage), From c1a1f1aef9f3319c325cac686a1e06c1a479320f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Sep 2022 13:16:08 -0400 Subject: [PATCH 42/78] Bump azure-core-http-netty from 1.12.0 to 1.12.4 in /plugins/repository-azure (#4160) * Bump azure-core-http-netty in /plugins/repository-azure Bumps [azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.12.0 to 1.12.4. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core_1.12.0...azure-core-http-netty_1.12.4) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Added missing forbidden classes Signed-off-by: Owais Kazi * update version of azure-storage-common and azure-core-http-netty Signed-off-by: Xue Zhou * update version of azure-storage-common and azure-core-http-netty Signed-off-by: Xue Zhou * adding changelog information Signed-off-by: Xue Zhou Signed-off-by: dependabot[bot] Signed-off-by: Owais Kazi Signed-off-by: Xue Zhou Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Owais Kazi Co-authored-by: Xue Zhou --- CHANGELOG.md | 3 +++ plugins/repository-azure/build.gradle | 8 ++++---- .../repository-azure/licenses/azure-core-1.27.0.jar.sha1 | 1 - .../repository-azure/licenses/azure-core-1.31.0.jar.sha1 | 1 + .../licenses/azure-core-http-netty-1.12.0.jar.sha1 | 1 - .../licenses/azure-core-http-netty-1.12.4.jar.sha1 | 1 + .../licenses/azure-storage-common-12.16.0.jar.sha1 | 1 - .../licenses/azure-storage-common-12.18.0.jar.sha1 | 1 + 8 files changed, 10 insertions(+), 7 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index bcbd7f9ca88e1..e10f051dfe738 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,9 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 +- Bumps azure-core-http-netty from 1.12.0 to 1.12.4([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) +- Bumps azure-core from 1.27.0 to 1.31.0([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) +- Bumps azure-storage-common from 12.16.0 to 12.18.0([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 227d7d1b68977..1af2de4f176f2 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,9 +44,9 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.27.0' - api 'com.azure:azure-storage-common:12.16.0' - api 'com.azure:azure-core-http-netty:1.12.0' + api 'com.azure:azure-core:1.31.0' + api 'com.azure:azure-storage-common:12.18.0' + api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -137,7 +137,7 @@ thirdPartyAudit { 'javax.xml.bind.annotation.XmlAccessOrder', 'javax.xml.bind.annotation.XmlAccessType', 'javax.xml.bind.annotation.XmlAccessorOrder', - 'javax.xml.bind.annotation.XmlAccessorType', + 'javax.xml.bind.annotation.XmlAccessorType', 'javax.xml.bind.annotation.XmlAttribute', 'javax.xml.bind.annotation.XmlElement', 'javax.xml.bind.annotation.XmlElement$DEFAULT', diff --git a/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 deleted file mode 100644 index 9206b697ca648..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.27.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75a2db538d218e2bd3c2cbdf04c955b8f6db6626 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6a5076b3da301 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 @@ -0,0 +1 @@ +39f18dae02237f90f1cd23b56701d7f9d9525531 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 deleted file mode 100644 index 1b5d162c004de..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4381e4e2801ee190ae76b61dbd992e94b40272e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 new file mode 100644 index 0000000000000..5cb180b20cf8b --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 @@ -0,0 +1 @@ +70dcc08887f2d70a8f812bf00d4fa10390fab3fd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 deleted file mode 100644 index ebf328aa69ee8..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f652b89a30269bdff6644468632726d4ba4fbd1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 new file mode 100644 index 0000000000000..f824d6cdf4f18 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.18.0.jar.sha1 @@ -0,0 +1 @@ +cb6fa5863f5cd8406934baec739285209165ef4b \ No newline at end of file From ce6c6a80a69409e266ca901d367e6ba9708e75b7 Mon Sep 17 00:00:00 2001 From: mwilkinson-imo <47403752+mwilkinson-imo@users.noreply.github.com> Date: Wed, 7 Sep 2022 12:21:10 -0500 Subject: [PATCH 43/78] Bugfix: Allow opensearch.bat file and opensearch-env.bat files to run when install path includes a space. (#4362) * Bugfix: Prevent escaping of in `else-if` statement by setting variable without double quotes. Signed-off-by: Mike Wilkinson * Add changelog entry for fix Signed-off-by: Mike Wilkinson * Escape double quotes for environment variables set by `opensearch-env.bat`. Explicitly apply quotes where those environment variables are invoked. Signed-off-by: Mike Wilkinson Signed-off-by: Mike Wilkinson Co-authored-by: Mike Wilkinson --- CHANGELOG.md | 1 + distribution/src/bin/opensearch-cli.bat | 2 +- distribution/src/bin/opensearch-env.bat | 10 +++++----- distribution/src/bin/opensearch-service.bat | 4 ++-- distribution/src/bin/opensearch.bat | 6 +++--- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e10f051dfe738..680f30eca0f20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fixed the `_cat/shards/10_basic.yml` test cases fix. - [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) - [Bug]: gradle check failing with java heap OutOfMemoryError (([#4328](https://github.com/opensearch-project/OpenSearch/ +- `opensearch.bat` fails to execute when install path includes spaces ([#4362](https://github.com/opensearch-project/OpenSearch/pull/4362)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/distribution/src/bin/opensearch-cli.bat b/distribution/src/bin/opensearch-cli.bat index 734669e1f9349..f080346a4478a 100644 --- a/distribution/src/bin/opensearch-cli.bat +++ b/distribution/src/bin/opensearch-cli.bat @@ -16,7 +16,7 @@ rem use a small heap size for the CLI tools, and thus the serial collector to rem avoid stealing many CPU cycles; a user can override by setting OPENSEARCH_JAVA_OPTS set OPENSEARCH_JAVA_OPTS=-Xms4m -Xmx64m -XX:+UseSerialGC %OPENSEARCH_JAVA_OPTS% -%JAVA% ^ +"%JAVA%" ^ %OPENSEARCH_JAVA_OPTS% ^ -Dopensearch.path.home="%OPENSEARCH_HOME%" ^ -Dopensearch.path.conf="%OPENSEARCH_PATH_CONF%" ^ diff --git a/distribution/src/bin/opensearch-env.bat b/distribution/src/bin/opensearch-env.bat index 96770f72f35c8..95088aaee7d3d 100644 --- a/distribution/src/bin/opensearch-env.bat +++ b/distribution/src/bin/opensearch-env.bat @@ -43,14 +43,14 @@ rem comparing to empty string makes this equivalent to bash -v check on env var rem and allows to effectively force use of the bundled jdk when launching OpenSearch rem by setting OPENSEARCH_JAVA_HOME= and JAVA_HOME= if not "%OPENSEARCH_JAVA_HOME%" == "" ( - set JAVA="%OPENSEARCH_JAVA_HOME%\bin\java.exe" + set "JAVA=%OPENSEARCH_JAVA_HOME%\bin\java.exe" set JAVA_TYPE=OPENSEARCH_JAVA_HOME ) else if not "%JAVA_HOME%" == "" ( - set JAVA="%JAVA_HOME%\bin\java.exe" + set "JAVA=%JAVA_HOME%\bin\java.exe" set JAVA_TYPE=JAVA_HOME ) else ( - set JAVA="%OPENSEARCH_HOME%\jdk\bin\java.exe" - set JAVA_HOME="%OPENSEARCH_HOME%\jdk" + set "JAVA=%OPENSEARCH_HOME%\jdk\bin\java.exe" + set "JAVA_HOME=%OPENSEARCH_HOME%\jdk" set JAVA_TYPE=bundled jdk ) @@ -73,4 +73,4 @@ if defined JAVA_OPTS ( ) rem check the Java version -%JAVA% -cp "%OPENSEARCH_CLASSPATH%" "org.opensearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 +"%JAVA%" -cp "%OPENSEARCH_CLASSPATH%" "org.opensearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 diff --git a/distribution/src/bin/opensearch-service.bat b/distribution/src/bin/opensearch-service.bat index a11dc8316e8b1..c1f3f264ec4a0 100644 --- a/distribution/src/bin/opensearch-service.bat +++ b/distribution/src/bin/opensearch-service.bat @@ -121,7 +121,7 @@ if exist "%JAVA_HOME%\bin\server\jvm.dll" ( :foundJVM if not defined OPENSEARCH_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a + for /f "tokens=* usebackq" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a ) rem The JVM options parser produces the final JVM options to start @@ -135,7 +135,7 @@ rem - third, JVM options from OPENSEARCH_JAVA_OPTS are applied rem - fourth, ergonomic JVM options are applied @setlocal -for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a +for /F "usebackq delims=" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%OPENSEARCH_JAVA_OPTS%" & set OPENSEARCH_JAVA_OPTS=%OPENSEARCH_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index dda15124e1654..cce21504c55b7 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -75,7 +75,7 @@ IF "%checkpassword%"=="Y" ( ) if not defined OPENSEARCH_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a + for /f "tokens=* usebackq" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.TempDirectory"`) do set OPENSEARCH_TMPDIR=%%a ) rem The JVM options parser produces the final JVM options to start @@ -88,7 +88,7 @@ rem jvm.options.d/*.options rem - third, JVM options from OPENSEARCH_JAVA_OPTS are applied rem - fourth, ergonomic JVM options are applied @setlocal -for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a +for /F "usebackq delims=" %%a in (`CALL "%JAVA%" -cp "!OPENSEARCH_CLASSPATH!" "org.opensearch.tools.launchers.JvmOptionsParser" "!OPENSEARCH_PATH_CONF!" ^|^| echo jvm_options_parser_failed`) do set OPENSEARCH_JAVA_OPTS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%OPENSEARCH_JAVA_OPTS%" & set OPENSEARCH_JAVA_OPTS=%OPENSEARCH_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( @@ -103,7 +103,7 @@ SET KEYSTORE_PASSWORD=!KEYSTORE_PASSWORD:^<=^^^=^^^>! SET KEYSTORE_PASSWORD=!KEYSTORE_PASSWORD:^\=^^^\! -ECHO.!KEYSTORE_PASSWORD!| %JAVA% %OPENSEARCH_JAVA_OPTS% -Dopensearch ^ +ECHO.!KEYSTORE_PASSWORD!| "%JAVA%" %OPENSEARCH_JAVA_OPTS% -Dopensearch ^ -Dopensearch.path.home="%OPENSEARCH_HOME%" -Dopensearch.path.conf="%OPENSEARCH_PATH_CONF%" ^ -Dopensearch.distribution.type="%OPENSEARCH_DISTRIBUTION_TYPE%" ^ -Dopensearch.bundled_jdk="%OPENSEARCH_BUNDLED_JDK%" ^ From 3ef0046916e93b5c1c7bde081eba4e672d1cec2e Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 7 Sep 2022 11:26:16 -0700 Subject: [PATCH 44/78] [Segment Replication] - Update replicas to commit SegmentInfos instead of relying on segments_N from primary shards. (#4402) * Segment Replication - Update replicas to commit SegmentInfos instead of relying on segments_N from primary shards. This change updates replicas to commit SegmentInfos before the shard is closed, on receiving a new commit point from a primary, and when a new primary is detected. This change also makes the public commitSegmentInfos on NRTEngine obsolete, refactoring IndexShard to simply call reset on the engine. Signed-off-by: Marc Handalian * Remove noise & extra log statement. Signed-off-by: Marc Handalian * PR feedback. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + .../index/engine/NRTReplicationEngine.java | 39 ++-- .../engine/NRTReplicationReaderManager.java | 3 + .../opensearch/index/shard/IndexShard.java | 28 +-- .../indices/recovery/MultiFileWriter.java | 5 +- .../engine/NRTReplicationEngineTests.java | 170 ++++++++--------- .../SegmentReplicationIndexShardTests.java | 174 +++++++++++++++++- .../indices/recovery/RecoveryTests.java | 3 +- .../index/engine/EngineTestCase.java | 8 +- .../index/shard/IndexShardTestCase.java | 10 +- 10 files changed, 288 insertions(+), 153 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 680f30eca0f20..bf5e1a70a493a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) - Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) +- [Segment Replication] Update replicas to commit SegmentInfos instead of relying on SIS files from primary shards. ([#4402](https://github.com/opensearch-project/OpenSearch/pull/4402)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index cf753e3360c39..12d420aa245fa 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -54,6 +54,8 @@ public class NRTReplicationEngine extends Engine { private final LocalCheckpointTracker localCheckpointTracker; private final WriteOnlyTranslogManager translogManager; + private volatile long lastReceivedGen = SequenceNumbers.NO_OPS_PERFORMED; + private static final int SI_COUNTER_INCREMENT = 10; public NRTReplicationEngine(EngineConfig engineConfig) { @@ -120,14 +122,16 @@ public TranslogManager translogManager() { public synchronized void updateSegments(final SegmentInfos infos, long seqNo) throws IOException { // Update the current infos reference on the Engine's reader. + final long incomingGeneration = infos.getGeneration(); readerManager.updateSegments(infos); - // only update the persistedSeqNo and "lastCommitted" infos reference if the incoming segments have a higher - // generation. We can still refresh with incoming SegmentInfos that are not part of a commit point. - if (infos.getGeneration() > lastCommittedSegmentInfos.getGeneration()) { - this.lastCommittedSegmentInfos = infos; + // Commit and roll the xlog when we receive a different generation than what was last received. + // lower/higher gens are possible from a new primary that was just elected. + if (incomingGeneration != lastReceivedGen) { + commitSegmentInfos(); translogManager.rollTranslogGeneration(); } + lastReceivedGen = incomingGeneration; localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); } @@ -141,20 +145,16 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th * * @throws IOException - When there is an IO error committing the SegmentInfos. */ - public void commitSegmentInfos() throws IOException { - // TODO: This method should wait for replication events to finalize. - final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); - /* - This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied - from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is - used to generate new segment file names. The ideal solution is to identify the counter from previous primary. - */ - latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; - latestSegmentInfos.changed(); - store.commitSegmentInfos(latestSegmentInfos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); + private void commitSegmentInfos(SegmentInfos infos) throws IOException { + store.commitSegmentInfos(infos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); + this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); translogManager.syncTranslog(); } + protected void commitSegmentInfos() throws IOException { + commitSegmentInfos(getLatestSegmentInfos()); + } + @Override public String getHistoryUUID() { return loadHistoryUUID(lastCommittedSegmentInfos.userData); @@ -354,6 +354,15 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; try { + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + /* + This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied + from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is + used to generate new segment file names. The ideal solution is to identify the counter from previous primary. + */ + latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; + latestSegmentInfos.changed(); + commitSegmentInfos(latestSegmentInfos); IOUtils.close(readerManager, translogManager, store::decRef); } catch (Exception e) { logger.warn("failed to close engine", e); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java index 16e615672a26f..8fbb24720aedc 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java @@ -74,6 +74,9 @@ protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader re * @throws IOException - When Refresh fails with an IOException. */ public synchronized void updateSegments(SegmentInfos infos) throws IOException { + // roll over the currentInfo's generation, this ensures the on-disk gen + // is always increased. + infos.updateGeneration(currentInfos); currentInfos = infos; maybeRefresh(); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 670af1f1c6fd9..28dc0ad49d4ec 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -623,7 +623,7 @@ public void updateShardState( if (indexSettings.isSegRepEnabled()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; - promoteNRTReplicaToPrimary(); + resetEngineToGlobalCheckpoint(); } replicationTracker.activatePrimaryMode(getLocalCheckpoint()); ensurePeerRecoveryRetentionLeasesExist(); @@ -3557,7 +3557,9 @@ private void innerAcquireReplicaOperationPermit( currentGlobalCheckpoint, maxSeqNo ); - if (currentGlobalCheckpoint < maxSeqNo) { + // With Segment Replication enabled, we never want to reset a replica's engine unless + // it is promoted to primary. + if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabled() == false) { resetEngineToGlobalCheckpoint(); } else { getEngine().translogManager().rollTranslogGeneration(); @@ -4120,26 +4122,4 @@ RetentionLeaseSyncer getRetentionLeaseSyncer() { public GatedCloseable getSegmentInfosSnapshot() { return getEngine().getSegmentInfosSnapshot(); } - - /** - * With segment replication enabled - prepare the shard's engine to be promoted as the new primary. - * - * If this shard is currently using a replication engine, this method: - * 1. Invokes {@link NRTReplicationEngine#commitSegmentInfos()} to ensure the engine can be reopened as writeable from the latest refresh point. - * InternalEngine opens its IndexWriter from an on-disk commit point, but this replica may have recently synced from a primary's refresh point, meaning it has documents searchable in its in-memory SegmentInfos - * that are not part of a commit point. This ensures that those documents are made part of a commit and do not need to be reindexed after promotion. - * 2. Invokes resetEngineToGlobalCheckpoint - This call performs the engine swap, opening up as a writeable engine and replays any operations in the xlog. The operations indexed from xlog here will be - * any ack'd writes that were not copied to this replica before promotion. - */ - private void promoteNRTReplicaToPrimary() { - assert shardRouting.primary() && indexSettings.isSegRepEnabled(); - getReplicationEngine().ifPresentOrElse(engine -> { - try { - engine.commitSegmentInfos(); - resetEngineToGlobalCheckpoint(); - } catch (IOException e) { - throw new EngineException(shardId, "Unable to update replica to writeable engine, failing shard", e); - } - }, () -> { throw new EngineException(shardId, "Expected replica engine to be of type NRTReplicationEngine"); }); - } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java index 3509615052707..ec3986017afac 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java @@ -156,7 +156,10 @@ private void innerWriteFileChunk(StoreFileMetadata fileMetadata, long position, + temporaryFileName + "] in " + Arrays.toString(store.directory().listAll()); - store.directory().sync(Collections.singleton(temporaryFileName)); + // With Segment Replication, we will fsync after a full commit has been received. + if (store.indexSettings().isSegRepEnabled() == false) { + store.directory().sync(Collections.singleton(temporaryFileName)); + } IndexOutput remove = removeOpenIndexOutputs(name); assert remove == null || remove == indexOutput; // remove maybe null if we got finished } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 540054782133a..96d5573621683 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -11,14 +11,11 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; -import org.hamcrest.MatcherAssert; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; @@ -36,17 +33,21 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; public class NRTReplicationEngineTests extends EngineTestCase { + private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() + ); + public void testCreateEngine() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); @@ -70,7 +71,7 @@ public void testEngineWritesOpsToTranslog() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { List operations = generateHistoryOnReplica( @@ -93,6 +94,9 @@ public void testEngineWritesOpsToTranslog() throws Exception { // we don't index into nrtEngine, so get the doc ids from the regular engine. final List docs = getDocIds(engine, true); + // close the NRTEngine, it will commit on close and we'll reuse its store for an IE. + nrtEngine.close(); + // recover a new engine from the nrtEngine's xlog. nrtEngine.translogManager().syncTranslog(); try (InternalEngine engine = new InternalEngine(nrtEngine.config())) { @@ -104,88 +108,77 @@ public void testEngineWritesOpsToTranslog() throws Exception { } } - public void testUpdateSegments() throws Exception { + public void testUpdateSegments_replicaReceivesSISWithHigherGen() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { - // add docs to the primary engine. - List operations = generateHistoryOnReplica( - between(1, 500), - randomBoolean(), - randomBoolean(), - randomBoolean(), - Engine.Operation.TYPE.INDEX - ); - - for (Engine.Operation op : operations) { - applyOperation(engine, op); - applyOperation(nrtEngine, op); - } - - engine.refresh("test"); - - final SegmentInfos latestPrimaryInfos = engine.getLatestSegmentInfos(); - nrtEngine.updateSegments(latestPrimaryInfos, engine.getProcessedLocalCheckpoint()); - assertMatchingSegmentsAndCheckpoints(nrtEngine, latestPrimaryInfos); - - // assert a doc from the operations exists. - final ParsedDocument parsedDoc = createParsedDoc(operations.stream().findFirst().get().id(), null); - try (Engine.GetResult getResult = engine.get(newGet(true, parsedDoc), engine::acquireSearcher)) { - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - } - - try (Engine.GetResult getResult = nrtEngine.get(newGet(true, parsedDoc), nrtEngine::acquireSearcher)) { - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - } - - // Flush the primary and update the NRTEngine with the latest committed infos. - engine.flush(); - nrtEngine.translogManager().syncTranslog(); // to advance persisted checkpoint + // assume we start at the same gen. + assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); + assertEquals(nrtEngine.getLatestSegmentInfos().getGeneration(), nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(engine.getLatestSegmentInfos().getGeneration(), nrtEngine.getLatestSegmentInfos().getGeneration()); + + // flush the primary engine - we don't need any segments, just force a new commit point. + engine.flush(true, true); + assertEquals(3, engine.getLatestSegmentInfos().getGeneration()); + nrtEngine.updateSegments(engine.getLatestSegmentInfos(), engine.getProcessedLocalCheckpoint()); + assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); + } + } - Set seqNos = operations.stream().map(Engine.Operation::seqNo).collect(Collectors.toSet()); + public void testUpdateSegments_replicaReceivesSISWithLowerGen() throws IOException { + // if the replica is already at segments_N that is received, it will commit segments_N+1. + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - nrtEngine.ensureOpen(); - try ( - Translog.Snapshot snapshot = assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getTranslog().newSnapshot() - ) { - assertThat(snapshot.totalOperations(), equalTo(operations.size())); - assertThat( - TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), - equalTo(seqNos) - ); - } + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + nrtEngine.getLatestSegmentInfos().changed(); + nrtEngine.getLatestSegmentInfos().changed(); + // commit the infos to push us to segments_3. + nrtEngine.commitSegmentInfos(); + assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); - final SegmentInfos primaryInfos = engine.getLastCommittedSegmentInfos(); + // update the replica with segments_2 from the primary. + final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); + assertEquals(2, primaryInfos.getGeneration()); nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); - assertMatchingSegmentsAndCheckpoints(nrtEngine, primaryInfos); + assertEquals(4, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(4, nrtEngine.getLatestSegmentInfos().getGeneration()); + assertEquals(primaryInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(primaryInfos.getVersion(), nrtEngine.getLastCommittedSegmentInfos().getVersion()); - assertEquals( - assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getTranslog().getGeneration().translogFileGeneration, - assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().getGeneration().translogFileGeneration - ); + nrtEngine.close(); + assertEquals(5, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + } + } - try ( - Translog.Snapshot snapshot = assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getTranslog().newSnapshot() - ) { - assertThat(snapshot.totalOperations(), equalTo(operations.size())); - assertThat( - TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), - equalTo(seqNos) - ); - } + public void testUpdateSegments_replicaCommitsFirstReceivedInfos() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - // Ensure the same hit count between engines. - int expectedDocCount; - try (final Engine.Searcher test = engine.acquireSearcher("test")) { - expectedDocCount = test.count(Queries.newMatchAllQuery()); - assertSearcherHits(nrtEngine, expectedDocCount); - } - assertEngineCleanedUp(nrtEngine, assertAndGetInternalTranslogManager(nrtEngine.translogManager()).getDeletionPolicy()); + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + assertEquals(2, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); + // bump the latest infos version a couple of times so that we can assert the correct version after commit. + engine.getLatestSegmentInfos().changed(); + engine.getLatestSegmentInfos().changed(); + assertNotEquals(nrtEngine.getLatestSegmentInfos().getVersion(), engine.getLatestSegmentInfos().getVersion()); + + // update replica with the latest primary infos, it will be the same gen, segments_2, ensure it is also committed. + final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); + assertEquals(2, primaryInfos.getGeneration()); + nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); + final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); + assertEquals(primaryInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(primaryInfos.getVersion(), lastCommittedSegmentInfos.getVersion()); } } @@ -193,7 +186,7 @@ public void testTrimTranslogOps() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( - final Store nrtEngineStore = createStore(); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); ) { List operations = generateHistoryOnReplica( @@ -227,12 +220,9 @@ public void testCommitSegmentInfos() throws Exception { // This test asserts that NRTReplication#commitSegmentInfos creates a new commit point with the latest checkpoints // stored in user data. final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( - "index", - Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() - ); + try ( - final Store nrtEngineStore = createStore(indexSettings, newDirectory()); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) @@ -268,22 +258,6 @@ public void testCommitSegmentInfos() throws Exception { } } - private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine, SegmentInfos expectedSegmentInfos) - throws IOException { - assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); - assertEquals(engine.getProcessedLocalCheckpoint(), nrtEngine.getProcessedLocalCheckpoint()); - assertEquals(engine.getLocalCheckpointTracker().getMaxSeqNo(), nrtEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(expectedSegmentInfos.files(true), nrtEngine.getLatestSegmentInfos().files(true)); - assertEquals(expectedSegmentInfos.getUserData(), nrtEngine.getLatestSegmentInfos().getUserData()); - assertEquals(expectedSegmentInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); - } - - private void assertSearcherHits(Engine engine, int hits) { - try (final Engine.Searcher test = engine.acquireSearcher("test")) { - MatcherAssert.assertThat(test, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(hits)); - } - } - private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, Store store) throws IOException { Lucene.cleanLuceneIndex(store.directory()); final Path translogDir = createTempDir(); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 3af882a8087ec..007317f6e71cd 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.shard; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; import org.junit.Assert; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; @@ -15,6 +17,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -48,6 +51,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -66,7 +70,7 @@ public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelRepli .build(); /** - * Test that latestReplicationCheckpoint returns null only for docrep enabled indices + * Test that latestReplicationCheckpoint returns null only for docrep enabled indices */ public void testReplicationCheckpointNullForDocRep() throws IOException { Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "DOCUMENT").put(Settings.EMPTY).build(); @@ -76,11 +80,10 @@ public void testReplicationCheckpointNullForDocRep() throws IOException { } /** - * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices + * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices */ - public void testReplicationCheckpointNotNullForSegReb() throws IOException { - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT").put(Settings.EMPTY).build(); - final IndexShard indexShard = newStartedShard(indexSettings); + public void testReplicationCheckpointNotNullForSegRep() throws IOException { + final IndexShard indexShard = newStartedShard(randomBoolean(), settings, new NRTReplicationEngineFactory()); final ReplicationCheckpoint replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); assertNotNull(replicationCheckpoint); closeShards(indexShard); @@ -205,6 +208,132 @@ public void testPublishCheckpointAfterRelocationHandOff() throws IOException { closeShards(shard); } + public void testReplicaReceivesGenIncrease() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + flushShard(primary, true); + replicateSegments(primary, shards.getReplicas()); + + final int totalDocs = numDocs + shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + assertEqualCommittedSegments(primary, replica); + assertDocCount(primary, totalDocs); + assertDocCount(replica, totalDocs); + } + } + + public void testReplicaReceivesLowerGeneration() throws Exception { + // when a replica gets incoming segments that are lower than what it currently has on disk. + + // start 3 nodes Gens: P [2], R [2], R[2] + // index some docs and flush twice, push to only 1 replica. + // State Gens: P [4], R-1 [3], R-2 [2] + // Promote R-2 as the new primary and demote the old primary. + // State Gens: R[4], R-1 [3], P [4] - *commit on close of NRTEngine, xlog replayed and commit made. + // index docs on new primary and flush + // replicate to all. + // Expected result: State Gens: P[4], R-1 [4], R-2 [4] + try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica_1 = shards.getReplicas().get(0); + final IndexShard replica_2 = shards.getReplicas().get(1); + int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + flushShard(primary, false); + replicateSegments(primary, List.of(replica_1)); + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + shards.indexDocs(numDocs); + flushShard(primary, false); + assertLatestCommitGen(4, primary); + replicateSegments(primary, List.of(replica_1)); + + assertEqualCommittedSegments(primary, replica_1); + assertLatestCommitGen(4, primary, replica_1); + assertLatestCommitGen(2, replica_2); + + shards.promoteReplicaToPrimary(replica_2).get(); + primary.close("demoted", false); + primary.store().close(); + IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + assertLatestCommitGen(4, oldPrimary); + assertEqualCommittedSegments(oldPrimary, replica_1); + + assertLatestCommitGen(4, replica_2); + + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + shards.indexDocs(numDocs); + flushShard(replica_2, false); + replicateSegments(replica_2, shards.getReplicas()); + assertEqualCommittedSegments(replica_2, oldPrimary, replica_1); + } + } + + public void testReplicaRestarts() throws Exception { + try (ReplicationGroup shards = createGroup(3, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh and copy the segments over. + if (randomBoolean()) { + flushShard(primary); + } + primary.refresh("Test"); + replicateSegments(primary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(primary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + + final int i1 = randomInt(5); + for (int i = 0; i < i1; i++) { + shards.indexDocs(randomInt(10)); + + // randomly resetart a replica + final IndexShard replicaToRestart = getRandomReplica(shards); + replicaToRestart.close("restart", false); + replicaToRestart.store().close(); + shards.removeReplica(replicaToRestart); + final IndexShard newReplica = shards.addReplicaWithExistingPath( + replicaToRestart.shardPath(), + replicaToRestart.routingEntry().currentNodeId() + ); + shards.recoverReplica(newReplica); + + // refresh and push segments to our other replicas. + if (randomBoolean()) { + failAndPromoteRandomReplica(shards); + } + flushShard(shards.getPrimary()); + replicateSegments(shards.getPrimary(), shards.getReplicas()); + } + primary = shards.getPrimary(); + + // refresh and push segments to our other replica. + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterReplication = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterReplication)); + } + } + } + public void testNRTReplicaPromotedAsPrimary() throws Exception { try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { shards.startAll(); @@ -523,4 +652,39 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept assertEquals("Should have resolved listener with failure", 0, latch.getCount()); assertNull(targetService.get(target.getId())); } + + private IndexShard getRandomReplica(ReplicationGroup shards) { + return shards.getReplicas().get(randomInt(shards.getReplicas().size() - 1)); + } + + private IndexShard failAndPromoteRandomReplica(ReplicationGroup shards) throws IOException { + IndexShard primary = shards.getPrimary(); + final IndexShard newPrimary = getRandomReplica(shards); + shards.promoteReplicaToPrimary(newPrimary); + primary.close("demoted", true); + primary.store().close(); + primary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); + shards.recoverReplica(primary); + return newPrimary; + } + + private void assertLatestCommitGen(long expected, IndexShard... shards) throws IOException { + for (IndexShard indexShard : shards) { + try (final GatedCloseable commit = indexShard.acquireLastIndexCommit(false)) { + assertEquals(expected, commit.get().getGeneration()); + } + } + } + + private void assertEqualCommittedSegments(IndexShard primary, IndexShard... replicas) throws IOException { + for (IndexShard replica : replicas) { + final SegmentInfos replicaInfos = replica.store().readLastCommittedSegmentsInfo(); + final SegmentInfos primaryInfos = primary.store().readLastCommittedSegmentsInfo(); + final Map latestReplicaMetadata = replica.store().getSegmentMetadataMap(replicaInfos); + final Map latestPrimaryMetadata = primary.store().getSegmentMetadataMap(primaryInfos); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(latestPrimaryMetadata, latestReplicaMetadata); + assertTrue(diff.different.isEmpty()); + assertTrue(diff.missing.isEmpty()); + } + } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 3ea74dbf38919..cc5100fba9010 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -61,6 +61,7 @@ import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.InternalEngineTests; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.replication.RecoveryDuringReplicationTests; @@ -106,7 +107,7 @@ public void testTranslogHistoryTransferred() throws Exception { public void testWithSegmentReplication_ReplicaUsesPrimaryTranslogUUID() throws Exception { Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); - try (ReplicationGroup shards = createGroup(2, settings)) { + try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { shards.startAll(); final String expectedUUID = getTranslog(shards.getPrimary()).getTranslogUUID(); assertTrue( diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index af754d77560cc..f4a9f51789679 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -1507,10 +1507,10 @@ public static MapperService createMapperService() throws IOException { * Exposes a translog associated with the given engine for testing purpose. */ public static Translog getTranslog(Engine engine) { - assert engine instanceof InternalEngine : "only InternalEngines have translogs, got: " + engine.getClass(); - InternalEngine internalEngine = (InternalEngine) engine; - internalEngine.ensureOpen(); - TranslogManager translogManager = internalEngine.translogManager(); + assert engine instanceof InternalEngine || engine instanceof NRTReplicationEngine + : "only InternalEngines or NRTReplicationEngines have translogs, got: " + engine.getClass(); + engine.ensureOpen(); + TranslogManager translogManager = engine.translogManager(); assert translogManager instanceof InternalTranslogManager : "only InternalTranslogManager have translogs, got: " + engine.getClass(); InternalTranslogManager internalTranslogManager = (InternalTranslogManager) translogManager; diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 073dc4b84472e..09eca006d600a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -139,6 +139,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -1252,10 +1253,10 @@ public final List replicateSegments( List replicaShards ) throws IOException, InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); - Store.MetadataSnapshot primaryMetadata; + Map primaryMetadata; try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); + primaryMetadata = primaryShard.store().getSegmentMetadataMap(primarySegmentInfos); } List ids = new ArrayList<>(); for (IndexShard replica : replicaShards) { @@ -1267,12 +1268,11 @@ public final List replicateSegments( public void onReplicationDone(SegmentReplicationState state) { try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { final SegmentInfos replicaInfos = snapshot.get(); - final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); - final Store.RecoveryDiff recoveryDiff = primaryMetadata.recoveryDiff(replicaMetadata); + final Map replicaMetadata = replica.store().getSegmentMetadataMap(replicaInfos); + final Store.RecoveryDiff recoveryDiff = Store.segmentReplicationDiff(primaryMetadata, replicaMetadata); assertTrue(recoveryDiff.missing.isEmpty()); assertTrue(recoveryDiff.different.isEmpty()); assertEquals(recoveryDiff.identical.size(), primaryMetadata.size()); - assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); } finally { From dac99d543b50055ccd8354fd9f93eb4efd9e359d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Sep 2022 14:53:25 -0400 Subject: [PATCH 45/78] Bump reactor-netty-core from 1.0.19 to 1.0.22 in /plugins/repository-azure (#4447) * Bump reactor-netty-core in /plugins/repository-azure Bumps [reactor-netty-core](https://github.com/reactor/reactor-netty) from 1.0.19 to 1.0.22. - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.0.19...v1.0.22) --- updated-dependencies: - dependency-name: io.projectreactor.netty:reactor-netty-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- plugins/repository-azure/build.gradle | 2 +- .../licenses/reactor-netty-core-1.0.19.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.0.22.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index bf5e1a70a493a..ec8441831448f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) ### Dependencies - Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 +- Bumps `reactor-netty-core` from 1.0.19 to 1.0.22 ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 @@ -77,4 +78,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 1af2de4f176f2..08cd32e80a7ca 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -58,7 +58,7 @@ dependencies { api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.18' api 'io.projectreactor.netty:reactor-netty:1.0.18' - api 'io.projectreactor.netty:reactor-netty-core:1.0.19' + api 'io.projectreactor.netty:reactor-netty-core:1.0.22' api 'io.projectreactor.netty:reactor-netty-http:1.0.18' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 deleted file mode 100644 index 74df264a2b908..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adb58ba62d297b56d6b7915a50f048eddcfc81a6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 new file mode 100644 index 0000000000000..4c82e37d27043 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.0.22.jar.sha1 @@ -0,0 +1 @@ +5c2a258ac71e525c65f2e3a0bcf458b6c79bbc16 \ No newline at end of file From e305e1e45fae143e5dcf023bd55f4992814a2bfa Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Thu, 8 Sep 2022 09:46:23 -0700 Subject: [PATCH 46/78] Add bwcVersion 2.4.0 (#4455) * Add bwcVersion 2.4.0 Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- .ci/bwcVersions | 1 + CHANGELOG.md | 4 +++- server/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 914426eebe35e..1dc8dc955f7c6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -51,3 +51,4 @@ BWC_VERSION: - "2.2.1" - "2.2.2" - "2.3.0" + - "2.4.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index ec8441831448f..303934c7995b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) - Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) +- Update previous release bwc version to 2.4.0 ([#4455](https://github.com/opensearch-project/OpenSearch/pull/4455)) + ### Dependencies - Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 - Bumps `reactor-netty-core` from 1.0.19 to 1.0.22 @@ -78,4 +80,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 10e5f16419a7a..978f0ee2186f2 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -98,6 +98,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; From 517c19c9725e53663a280f9025679c2f9e659948 Mon Sep 17 00:00:00 2001 From: Andrija Pantovic Date: Thu, 8 Sep 2022 23:53:36 +0200 Subject: [PATCH 47/78] add support for s390x architecture (#4001) * add s390x support in systemcallfilter https://github.com/opensearch-project/OpenSearch/issues/4000 Signed-off-by: Andrija Pantovic * add gradle cfg for s390x Signed-off-by: Andrija Pantovic * change assertion for architecture Signed-off-by: Andrija Pantovic * change assertion for architecture Signed-off-by: Andrija Pantovic * update changelog Signed-off-by: Andrija Pantovic Signed-off-by: Andrija Pantovic Signed-off-by: Andrija Pantovic --- CHANGELOG.md | 1 + .../org/opensearch/gradle/Architecture.java | 5 ++- .../gradle/DistributionDownloadPlugin.java | 3 ++ .../main/java/org/opensearch/gradle/Jdk.java | 2 +- .../opensearch/gradle/ArchitectureTests.java | 45 +++++++++++++++++++ .../gradle/JdkDownloadPluginTests.java | 2 +- distribution/archives/build.gradle | 7 +++ distribution/build.gradle | 4 +- distribution/docker/build.gradle | 8 ++++ .../docker/docker-s390x-export/build.gradle | 13 ++++++ .../bootstrap/SystemCallFilter.java | 1 + settings.gradle | 2 + 12 files changed, 88 insertions(+), 5 deletions(-) create mode 100644 buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java create mode 100644 distribution/docker/docker-s390x-export/build.gradle diff --git a/CHANGELOG.md b/CHANGELOG.md index 303934c7995b2..75fde94ee7878 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added +- Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java b/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java index 38d6db8c9916e..2bd87d6fa50b2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Architecture.java @@ -35,7 +35,8 @@ public enum Architecture { X64, - ARM64; + ARM64, + S390X; public static Architecture current() { final String architecture = System.getProperty("os.arch", ""); @@ -45,6 +46,8 @@ public static Architecture current() { return X64; case "aarch64": return ARM64; + case "s390x": + return S390X; default: throw new IllegalArgumentException("can not determine architecture from [" + architecture + "]"); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java index fccdc49ef6fc9..ae7b0d938e8ef 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java @@ -247,6 +247,9 @@ private String dependencyNotation(OpenSearchDistribution distribution) { case X64: classifier = ":" + distribution.getPlatform() + "-x64"; break; + case S390X: + classifier = ":" + distribution.getPlatform() + "-s390x"; + break; default: throw new IllegalArgumentException("Unsupported architecture: " + distribution.getArchitecture()); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java index 53fd998bcc53f..4b289de3f0619 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java @@ -48,7 +48,7 @@ public class Jdk implements Buildable, Iterable { - private static final List ALLOWED_ARCHITECTURES = Collections.unmodifiableList(Arrays.asList("aarch64", "x64")); + private static final List ALLOWED_ARCHITECTURES = Collections.unmodifiableList(Arrays.asList("aarch64", "x64", "s390x")); private static final List ALLOWED_VENDORS = Collections.unmodifiableList(Arrays.asList("adoptium", "adoptopenjdk", "openjdk")); private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList( Arrays.asList("darwin", "freebsd", "linux", "mac", "windows") diff --git a/buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java b/buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java new file mode 100644 index 0000000000000..05f920c6c9248 --- /dev/null +++ b/buildSrc/src/test/java/org/opensearch/gradle/ArchitectureTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle; + +import org.opensearch.gradle.test.GradleUnitTestCase; + +public class ArchitectureTests extends GradleUnitTestCase { + + final String architecture = System.getProperty("os.arch", ""); + + public void testCurrentArchitecture() { + assertEquals(Architecture.X64, currentArchitecture("amd64")); + assertEquals(Architecture.X64, currentArchitecture("x86_64")); + assertEquals(Architecture.ARM64, currentArchitecture("aarch64")); + assertEquals(Architecture.S390X, currentArchitecture("s390x")); + } + + public void testInvalidCurrentArchitecture() { + assertThrows("can not determine architecture from [", IllegalArgumentException.class, () -> currentArchitecture("fooBar64")); + } + + /** + * Determines the return value of {@link Architecture#current()} based on a string representing a potential OS Architecture. + * + * @param osArchToTest An expected value of the {@code os.arch} system property on another architecture. + * @return the value of the {@link Architecture} enum which would have resulted with the given value. + * @throws IllegalArgumentException if the string is not mapped to a value of the {@link Architecture} enum. + */ + private Architecture currentArchitecture(String osArchToTest) throws IllegalArgumentException { + // Test new architecture + System.setProperty("os.arch", osArchToTest); + try { + return Architecture.current(); + } finally { + // Restore actual architecture property value + System.setProperty("os.arch", this.architecture); + } + } +} diff --git a/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java index 4dcc65cca4c62..ad17032e718d2 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java @@ -108,7 +108,7 @@ public void testUnknownArchitecture() { "11.0.2+33", "linux", "unknown", - "unknown architecture [unknown] for jdk [testjdk], must be one of [aarch64, x64]" + "unknown architecture [unknown] for jdk [testjdk], must be one of [aarch64, x64, s390x]" ); } diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index ac70ee04444c7..1376b8d419f6e 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -151,6 +151,13 @@ distribution_archives { } } + linuxS390xTar { + archiveClassifier = 'linux-s390x' + content { + archiveFiles(modulesFiles('linux-s390x'), 'tar', 'linux', 's390x', false) + } + } + windowsZip { archiveClassifier = 'windows-x64' content { diff --git a/distribution/build.gradle b/distribution/build.gradle index 21b7d85a7ef2b..ee9016210efc7 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -280,7 +280,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { // Setup all required JDKs project.jdks { ['darwin', 'linux', 'windows'].each { platform -> - (platform == 'linux' || platform == 'darwin' ? ['x64', 'aarch64'] : ['x64']).each { architecture -> + (platform == 'linux' || platform == 'darwin' ? ['x64', 'aarch64', 's390x'] : ['x64']).each { architecture -> "bundled_${platform}_${architecture}" { it.platform = platform it.version = VersionProperties.getBundledJdk(platform) @@ -353,7 +353,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } def buildModules = buildModulesTaskProvider - List excludePlatforms = ['darwin-x64', 'freebsd-x64', 'linux-x64', 'linux-arm64', 'windows-x64', 'darwin-arm64'] + List excludePlatforms = ['darwin-x64', 'freebsd-x64', 'linux-x64', 'linux-arm64', 'linux-s390x', 'windows-x64', 'darwin-arm64'] if (platform != null) { excludePlatforms.remove(excludePlatforms.indexOf(platform)) } else { diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index f5d8048a06276..7e0007f04c940 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -27,11 +27,13 @@ testFixtures.useFixture() configurations { arm64DockerSource + s390xDockerSource dockerSource } dependencies { arm64DockerSource project(path: ":distribution:archives:linux-arm64-tar", configuration:"default") + s390xDockerSource project(path: ":distribution:archives:linux-s390x-tar", configuration:"default") dockerSource project(path: ":distribution:archives:linux-tar", configuration:"default") } @@ -42,6 +44,8 @@ ext.expansions = { Architecture architecture, DockerBase base, boolean local -> classifier = "linux-arm64" } else if (architecture == Architecture.X64) { classifier = "linux-x64" + } else if (architecture == Architecture.S390X) { + classifier = "linux-s390x" } else { throw new IllegalArgumentException("Unsupported architecture [" + architecture + "]") } @@ -85,12 +89,14 @@ RUN curl --retry 8 -S -L \\ private static String buildPath(Architecture architecture, DockerBase base) { return 'build/' + (architecture == Architecture.ARM64 ? 'arm64-' : '') + + (architecture == Architecture.S390X ? 's390x-' : '') + 'docker' } private static String taskName(String prefix, Architecture architecture, DockerBase base, String suffix) { return prefix + (architecture == Architecture.ARM64 ? 'Arm64' : '') + + (architecture == Architecture.S390X ? 'S390x' : '') + suffix } @@ -127,6 +133,8 @@ void addCopyDockerContextTask(Architecture architecture, DockerBase base) { if (architecture == Architecture.ARM64) { from configurations.arm64DockerSource + } else if (architecture == Architecture.S390X) { + from configurations.s390xDockerSource } else { from configurations.dockerSource } diff --git a/distribution/docker/docker-s390x-export/build.gradle b/distribution/docker/docker-s390x-export/build.gradle new file mode 100644 index 0000000000000..3506c4e39c234 --- /dev/null +++ b/distribution/docker/docker-s390x-export/build.gradle @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// export is done in the parent project. diff --git a/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java index f8baee06c4315..7d567d73851a9 100644 --- a/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java +++ b/server/src/main/java/org/opensearch/bootstrap/SystemCallFilter.java @@ -259,6 +259,7 @@ static class Arch { Map m = new HashMap<>(); m.put("amd64", new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317)); m.put("aarch64", new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277)); + m.put("s390x", new Arch(0x80000016, 0xFFFFFFFF, 2, 190, 11, 354, 348)); ARCHITECTURES = Collections.unmodifiableMap(m); } diff --git a/settings.gradle b/settings.gradle index 4c389b5490e7c..92e07cbb2e7fb 100644 --- a/settings.gradle +++ b/settings.gradle @@ -47,11 +47,13 @@ List projects = [ 'distribution:archives:freebsd-tar', 'distribution:archives:no-jdk-freebsd-tar', 'distribution:archives:linux-arm64-tar', + 'distribution:archives:linux-s390x-tar', 'distribution:archives:linux-tar', 'distribution:archives:no-jdk-linux-tar', 'distribution:docker', 'distribution:docker:docker-arm64-build-context', 'distribution:docker:docker-arm64-export', + 'distribution:docker:docker-s390x-export', 'distribution:docker:docker-build-context', 'distribution:docker:docker-export', 'distribution:packages:arm64-deb', From 54364a5d45ed0a20de42abb53cca8f33cfed88eb Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Thu, 8 Sep 2022 19:54:06 -0700 Subject: [PATCH 48/78] 2.3.0 release notes (#4457) (#4464) * 2.3.0 release notes Signed-off-by: Suraj Singh * Add changelog entry Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../opensearch.release-notes-2.3.0.md | 55 +++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.3.0.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 75fde94ee7878..a10824a56af05 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - BWC version 2.2.2 ([#4383](https://github.com/opensearch-project/OpenSearch/pull/4383)) - Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) - Update previous release bwc version to 2.4.0 ([#4455](https://github.com/opensearch-project/OpenSearch/pull/4455)) +- 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) ### Dependencies - Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 diff --git a/release-notes/opensearch.release-notes-2.3.0.md b/release-notes/opensearch.release-notes-2.3.0.md new file mode 100644 index 0000000000000..1532ab31106f7 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.3.0.md @@ -0,0 +1,55 @@ +## 2022-09-08 Version 2.3.0 Release Notes + +### Features/Enhancements +* [Backport to 2.x] [Segment Replication] - Update replicas to commit SegmentInfos instead of relying on segments_N from primary shards. ([#4450](https://github.com/opensearch-project/opensearch/pull/4450)) +* [Segment Replication] [Backport] Fix timeout issue by calculating time needed to process getSegmentFiles. ([#4434](https://github.com/opensearch-project/opensearch/pull/4434)) +* [Semgnet Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/opensearch/pull/4414)) ([#4425](https://github.com/opensearch-project/opensearch/pull/4425)) +* [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/opensearch/pull/4386)) ([#4424](https://github.com/opensearch-project/opensearch/pull/4424)) +* Segment Replication - Fix NoSuchFileException errors caused when computing metadata snapshot on primary shards. ([#4366](https://github.com/opensearch-project/opensearch/pull/4366)) ([#4422](https://github.com/opensearch-project/opensearch/pull/4422)) +* [Remote Store] Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/opensearch/pull/4253)) ([#4418](https://github.com/opensearch-project/opensearch/pull/4418)) +* [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/opensearch/pull/4363)) ([#4396](https://github.com/opensearch-project/opensearch/pull/4396)) +* [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/opensearch/pull/4365)) ([#4397](https://github.com/opensearch-project/opensearch/pull/4397)) +* Segment Replication - Implement segment replication event cancellation. ([#4225](https://github.com/opensearch-project/opensearch/pull/4225)) ([#4387](https://github.com/opensearch-project/opensearch/pull/4387)) +* [Backport 2.x] [Remote Store] Backport remote segment store changes ([#4380](https://github.com/opensearch-project/opensearch/pull/4380)) +* [Backport 2.x] Added timing data and more granular stages to SegmentReplicationState ([#4367](https://github.com/opensearch-project/opensearch/pull/4367)) +* [Backport 2.x] Support shard promotion with Segment Replication. ([#4135](https://github.com/opensearch-project/opensearch/pull/4135)) ([#4325](https://github.com/opensearch-project/opensearch/pull/4325)) +* [Segment Replication] Update PrimaryShardAllocator to prefer replicas with higher replication checkpoint ([#4041](https://github.com/opensearch-project/opensearch/pull/4041)) ([#4252](https://github.com/opensearch-project/opensearch/pull/4252)) +* [Backport 2.x] [Segment Replication] Backport all PR's containing remaining segment replication changes ([#4243](https://github.com/opensearch-project/opensearch/pull/4243)) +* [Backport 2.x] [Segment Replication] Backport PR's : #3525 #3533 #3540 #3943 #3963 From main branch ([#4181](https://github.com/opensearch-project/opensearch/pull/4181)) +* [Backport 2.x] [Segment Replication] Added source-side classes for orchestrating replication events. ([#4128](https://github.com/opensearch-project/opensearch/pull/4128)) + +### Bug Fixes +* [Bug]: gradle check failing with java heap OutOfMemoryError ([#4328](https://github.com/opensearch-project/opensearch/pull/4328)) ([#4442](https://github.com/opensearch-project/opensearch/pull/4442)) +* [Backport 2.x] Revert to Netty 4.1.79.Final ([#4432](https://github.com/opensearch-project/opensearch/pull/4432)) +* Bug fixes for dependabot changelog verifier ([#4364](https://github.com/opensearch-project/opensearch/pull/4364)) ([#4395](https://github.com/opensearch-project/opensearch/pull/4395)) +* [BUG] Create logs directory before running OpenSearch on Windows ([#4305](https://github.com/opensearch-project/opensearch/pull/4305)) ([#4335](https://github.com/opensearch-project/opensearch/pull/4335)) +* [BUG] Running "opensearch-service.bat start" and "opensearch-service.bat manager" ([#4289](https://github.com/opensearch-project/opensearch/pull/4289)) ([#4293](https://github.com/opensearch-project/opensearch/pull/4293)) +* [Backport 2.x] Do not fail replica shard due to primary closure ([#4309](https://github.com/opensearch-project/opensearch/pull/4309)) +* [Bug]: gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/opensearch/pull/4150)) ([#4167](https://github.com/opensearch-project/opensearch/pull/4167)) +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/opensearch/pull/4143)) ([#4144](https://github.com/opensearch-project/opensearch/pull/4144)) + +### Infrastructure +* Add workflow for changelog verification ([#4085](https://github.com/opensearch-project/opensearch/pull/4085)) ([#4284](https://github.com/opensearch-project/opensearch/pull/4284)) +* Add 2.x version to CHANGELOG ([#4297](https://github.com/opensearch-project/opensearch/pull/4297)) ([#4303](https://github.com/opensearch-project/opensearch/pull/4303)) +* Update the head ref to changelog verifier ([#4296](https://github.com/opensearch-project/opensearch/pull/4296)) ([#4298](https://github.com/opensearch-project/opensearch/pull/4298)) +* Publish transport-netty4 module to central repository ([#4054](https://github.com/opensearch-project/opensearch/pull/4054)) ([#4078](https://github.com/opensearch-project/opensearch/pull/4078)) + +### Maintenance +* Add bwcVersion 1.3.6 to 2.x ([#4452](https://github.com/opensearch-project/opensearch/pull/4452)) +* [AUTO] [2.x] Added bwc version 2.2.2. ([#4385](https://github.com/opensearch-project/opensearch/pull/4385)) +* Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/opensearch/pull/4359)) ([#4374](https://github.com/opensearch-project/opensearch/pull/4374)) +* Adding @dreamer-89 to Opensearch maintainers. ([#4342](https://github.com/opensearch-project/opensearch/pull/4342)) ([#4345](https://github.com/opensearch-project/opensearch/pull/4345)) +* [CVE] Update snakeyaml dependency ([#4341](https://github.com/opensearch-project/opensearch/pull/4341)) ([#4347](https://github.com/opensearch-project/opensearch/pull/4347)) +* Some dependency updates ([#4308](https://github.com/opensearch-project/opensearch/pull/4308)) ([#4311](https://github.com/opensearch-project/opensearch/pull/4311)) +* Added bwc version 2.2.1 ([#4193](https://github.com/opensearch-project/opensearch/pull/4193)) +* Update Gradle to 7.5.1 ([#4211](https://github.com/opensearch-project/opensearch/pull/4211)) ([#4213](https://github.com/opensearch-project/opensearch/pull/4213)) +* [Backport] Upgrade dependencies ([#4165](https://github.com/opensearch-project/opensearch/pull/4165)) +* Bumping 2.x to 2.3.0 ([#4098](https://github.com/opensearch-project/opensearch/pull/4098)) + +### Refactoring +* Refactored the src and test of GeoHashGrid and GeoTileGrid Aggregations on GeoPoint from server folder to geo module.([#4071](https://github.com/opensearch-project/opensearch/pull/4071)) ([#4072](https://github.com/opensearch-project/opensearch/pull/4072)) ([#4180](https://github.com/opensearch-project/opensearch/pull/4180)) ([#4281](https://github.com/opensearch-project/opensearch/pull/4281)) +* Update the head ref to changelog verifier ([#4296](https://github.com/opensearch-project/opensearch/pull/4296)) ([#4298](https://github.com/opensearch-project/opensearch/pull/4298)) +* [2.x] Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/opensearch/pull/4307)) ([#4324](https://github.com/opensearch-project/opensearch/pull/4324)) +* Refactored the src and test of GeoHashGrid and GeoTileGrid Aggregations on GeoPoint from server folder to geo module.([#4071](https://github.com/opensearch-project/opensearch/pull/4071)) ([#4072](https://github.com/opensearch-project/opensearch/pull/4072)) ([#4180](https://github.com/opensearch-project/opensearch/pull/4180)) ([#4281](https://github.com/opensearch-project/opensearch/pull/4281)) +* Refactors the GeoBoundsAggregation for geo_point types from the core server to the geo module. ([#4179](https://github.com/opensearch-project/opensearch/pull/4179)) +* Backporting multiple 2.* release notes from main to the 2.x branch ([#4154](https://github.com/opensearch-project/opensearch/pull/4154)) From 5dc3d2e350ce667b0478a5cec1eed69251e91aff Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Tue, 13 Sep 2022 15:28:51 +0530 Subject: [PATCH 49/78] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs (#3948) * Add getHistoryOperationsFromTranslog method to fetch the hostory snapshot from translogs Signed-off-by: Ankit Kala --- CHANGELOG.md | 1 + .../opensearch/index/shard/IndexShard.java | 11 ++++ .../translog/InternalTranslogManager.java | 5 ++ .../index/translog/NoOpTranslogManager.java | 5 ++ .../index/translog/TranslogManager.java | 5 ++ .../translog/WriteOnlyTranslogManager.java | 5 ++ .../index/shard/IndexShardTests.java | 60 +++++++++++++++++++ 7 files changed, 92 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a10824a56af05..8f6a3d4671b14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) - Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) - [Segment Replication] Update replicas to commit SegmentInfos instead of relying on SIS files from primary shards. ([#4402](https://github.com/opensearch-project/OpenSearch/pull/4402)) +- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 28dc0ad49d4ec..dcb7bdeb30e4f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2357,6 +2357,17 @@ public Translog.Snapshot getHistoryOperations(String reason, long startingSeqNo, return getEngine().newChangesSnapshot(reason, startingSeqNo, endSeqNo, true, accurateCount); } + /** + * Creates a new history snapshot from the translog instead of the lucene index. Required for cross cluster replication. + * Use the recommended {@link #getHistoryOperations(String, long, long, boolean)} method for other cases. + * This method should only be invoked if Segment Replication or Remote Store is not enabled. + */ + public Translog.Snapshot getHistoryOperationsFromTranslog(long startingSeqNo, long endSeqNo) throws IOException { + assert (indexSettings.isSegRepEnabled() || indexSettings.isRemoteStoreEnabled()) == false + : "unsupported operation for segment replication enabled indices or remote store backed indices"; + return getEngine().translogManager().newChangesSnapshot(startingSeqNo, endSeqNo, true); + } + /** * Checks if we have a completed history of operations since the given starting seqno (inclusive). * This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock()} diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index fd52e02132006..574fcc54bafa6 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -98,6 +98,11 @@ public void rollTranslogGeneration() throws TranslogException { } } + @Override + public Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + return translog.newSnapshot(fromSeqNo, toSeqNo, requiredFullRange); + } + /** * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). * This operation will close the engine if the recovery fails. diff --git a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java index 328edad51b5b7..4e46ca4b3e79e 100644 --- a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java @@ -112,4 +112,9 @@ public Translog.Operation readOperation(Translog.Location location) throws IOExc public Translog.Location add(Translog.Operation operation) throws IOException { return new Translog.Location(0, 0, 0); } + + @Override + public Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + throw new UnsupportedOperationException("Translog snapshot unsupported with no-op translogs"); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 5353fa3b59124..39f819da4b018 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -33,6 +33,11 @@ public interface TranslogManager { */ int recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long localCheckpoint, long recoverUpToSeqNo) throws IOException; + /** + * Creates a new history snapshot from the translog file instead of the lucene index. + */ + Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException; + /** * Checks if the underlying storage sync is required. */ diff --git a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java index 96a2dd05851c0..60abdcb0dcb57 100644 --- a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java @@ -68,4 +68,9 @@ public int recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, lo public void skipTranslogRecovery() { // Do nothing. } + + @Override + public Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + throw new UnsupportedOperationException("Translog snapshot unsupported with no-op translogs"); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 662afa80f65fc..27c0437236f63 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -1196,6 +1196,66 @@ public void testAcquireReplicaPermitAdvanceMaxSeqNoOfUpdates() throws Exception closeShards(replica); } + public void testGetChangesSnapshotThrowsAssertForSegRep() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(8), + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT.toString()) + .build(); + final IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); + final AtomicBoolean synced = new AtomicBoolean(); + final IndexShard primaryShard = newShard( + shardRouting, + indexMetadata.build(), + null, + new InternalEngineFactory(), + () -> synced.set(true), + RetentionLeaseSyncer.EMPTY, + null + ); + expectThrows(AssertionError.class, () -> primaryShard.getHistoryOperationsFromTranslog(0, 1)); + closeShard(primaryShard, false); + } + + public void testGetChangesSnapshotThrowsAssertForRemoteStore() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(8), + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build(); + final IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); + final AtomicBoolean synced = new AtomicBoolean(); + final IndexShard primaryShard = newShard( + shardRouting, + indexMetadata.build(), + null, + new InternalEngineFactory(), + () -> synced.set(true), + RetentionLeaseSyncer.EMPTY, + null + ); + expectThrows(AssertionError.class, () -> primaryShard.getHistoryOperationsFromTranslog(0, 1)); + closeShard(primaryShard, false); + } + public void testGlobalCheckpointSync() throws IOException { // create the primary shard with a callback that sets a boolean when the global checkpoint sync is invoked final ShardId shardId = new ShardId("index", "_na_", 0); From 763a89fae1e11bf5ed151721e0fd0b6d43d3ab18 Mon Sep 17 00:00:00 2001 From: Ketan Verma Date: Tue, 13 Sep 2022 15:29:33 +0530 Subject: [PATCH 50/78] Fixed flaky test: ResourceAwareTasksTests.testTaskIdPersistsInThreadContext (#4484) Signed-off-by: Ketan Verma --- CHANGELOG.md | 1 + .../admin/cluster/node/tasks/ResourceAwareTasksTests.java | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f6a3d4671b14..1cbfc56ed776c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) - [Bug]: gradle check failing with java heap OutOfMemoryError (([#4328](https://github.com/opensearch-project/OpenSearch/ - `opensearch.bat` fails to execute when install path includes spaces ([#4362](https://github.com/opensearch-project/OpenSearch/pull/4362)) +- Fixed flaky test `ResourceAwareTasksTests.testTaskIdPersistsInThreadContext` ([#4484](https://github.com/opensearch-project/OpenSearch/pull/4484)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index 654d5cde7bb00..5d947a743385f 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -42,6 +42,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; @@ -616,6 +617,10 @@ public void onFailure(Exception e) { taskTestContext.requestCompleteLatch.await(); + // It is possible for the MockTaskManagerListener to be called after the response is sent already. + // Wait enough time for taskId to be added to taskIdsRemovedFromThreadContext before performing validations. + waitUntil(() -> taskIdsAddedToThreadContext.size() == taskIdsRemovedFromThreadContext.size(), 5, TimeUnit.SECONDS); + assertEquals(expectedTaskIdInThreadContext.get(), actualTaskIdInThreadContext.get()); assertThat(taskIdsAddedToThreadContext, containsInAnyOrder(taskIdsRemovedFromThreadContext.toArray())); } From 8366ea3fb4f0dbdc64b9dd2d566b27c5d88d7be3 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 13 Sep 2022 15:44:03 -0400 Subject: [PATCH 51/78] Getting security exception due to access denied 'java.lang.RuntimePermission' 'accessDeclaredMembers' when trying to get snapshot with S3 IRSA (#4469) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko Co-authored-by: Suraj Singh --- CHANGELOG.md | 1 + .../opensearch/repositories/s3/S3Service.java | 21 ++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cbfc56ed776c..dc1abd643d366 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) - [Bug]: gradle check failing with java heap OutOfMemoryError (([#4328](https://github.com/opensearch-project/OpenSearch/ - `opensearch.bat` fails to execute when install path includes spaces ([#4362](https://github.com/opensearch-project/OpenSearch/pull/4362)) +- Getting security exception due to access denied 'java.lang.RuntimePermission' 'accessDeclaredMembers' when trying to get snapshot with S3 IRSA ([#4469](https://github.com/opensearch-project/OpenSearch/pull/4469)) - Fixed flaky test `ResourceAwareTasksTests.testTaskIdPersistsInThreadContext` ([#4484](https://github.com/opensearch-project/OpenSearch/pull/4484)) ### Security diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 18bb62944dede..930af6f8a9799 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -305,21 +305,28 @@ static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c } if (irsaCredentials.getIdentityTokenFile() == null) { - return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>( - securityTokenService, + final STSAssumeRoleSessionCredentialsProvider.Builder stsCredentialsProviderBuilder = new STSAssumeRoleSessionCredentialsProvider.Builder(irsaCredentials.getRoleArn(), irsaCredentials.getRoleSessionName()) - .withStsClient(securityTokenService) - .build() + .withStsClient(securityTokenService); + + final STSAssumeRoleSessionCredentialsProvider stsCredentialsProvider = SocketAccess.doPrivileged( + stsCredentialsProviderBuilder::build ); + + return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>(securityTokenService, stsCredentialsProvider); } else { - return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>( - securityTokenService, + final STSAssumeRoleWithWebIdentitySessionCredentialsProvider.Builder stsCredentialsProviderBuilder = new STSAssumeRoleWithWebIdentitySessionCredentialsProvider.Builder( irsaCredentials.getRoleArn(), irsaCredentials.getRoleSessionName(), irsaCredentials.getIdentityTokenFile() - ).withStsClient(securityTokenService).build() + ).withStsClient(securityTokenService); + + final STSAssumeRoleWithWebIdentitySessionCredentialsProvider stsCredentialsProvider = SocketAccess.doPrivileged( + stsCredentialsProviderBuilder::build ); + + return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>(securityTokenService, stsCredentialsProvider); } } else if (basicCredentials != null) { logger.debug("Using basic key/secret credentials"); From 29153fdf56c3f82bfa80619775ee9506941302e5 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Tue, 13 Sep 2022 12:59:44 -0700 Subject: [PATCH 52/78] [Javadoc] add missing javadocs for :distribution:tools modules (#4483) * Add javadocs for distribution:tools:java-version-checker Signed-off-by: Daniel Widdis * Add javadocs for distribution:tools:keystore-cli Signed-off-by: Daniel Widdis * Add javadocs for distribution:tools:plugin-cli Signed-off-by: Daniel Widdis * Add javadocs for distribution:tools:launchers Signed-off-by: Daniel Widdis * Change log Signed-off-by: Daniel Widdis Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + .../java_version_checker/SuppressForbidden.java | 5 +++++ .../tools/java_version_checker/package-info.java | 12 ++++++++++++ .../org/opensearch/common/settings/KeyStoreCli.java | 8 +++++++- .../org/opensearch/common/settings/package-info.java | 12 ++++++++++++ distribution/tools/launchers/build.gradle | 1 - .../org/opensearch/tools/launchers/package-info.java | 12 ++++++++++++ .../main/java/org/opensearch/plugins/PluginCli.java | 8 +++++++- .../java/org/opensearch/plugins/PluginHelper.java | 2 ++ .../java/org/opensearch/plugins/package-info.java | 12 ++++++++++++ gradle/missing-javadoc.gradle | 4 ---- 11 files changed, 70 insertions(+), 7 deletions(-) create mode 100644 distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/package-info.java create mode 100644 distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java create mode 100644 distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/package-info.java create mode 100644 distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java diff --git a/CHANGELOG.md b/CHANGELOG.md index dc1abd643d366..5fb1461fc6afe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for labels on version bump PRs, skip label support for changelog verifier ([#4391](https://github.com/opensearch-project/OpenSearch/pull/4391)) - Update previous release bwc version to 2.4.0 ([#4455](https://github.com/opensearch-project/OpenSearch/pull/4455)) - 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) +- Added missing javadocs for `:distribution:tools` modules ([#4483](https://github.com/opensearch-project/OpenSearch/pull/4483)) ### Dependencies - Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 diff --git a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/SuppressForbidden.java b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/SuppressForbidden.java index 725718d85b179..d02e4e98b1287 100644 --- a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/SuppressForbidden.java +++ b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/SuppressForbidden.java @@ -43,5 +43,10 @@ @Retention(RetentionPolicy.CLASS) @Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) public @interface SuppressForbidden { + /** + * The argument to this annotation, specifying the reason a forbidden API is being used. + * + * @return The reason the error is being suppressed. + */ String reason(); } diff --git a/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/package-info.java b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/package-info.java new file mode 100644 index 0000000000000..a626a125bb4c9 --- /dev/null +++ b/distribution/tools/java-version-checker/src/main/java/org/opensearch/tools/java_version_checker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Tools to validate minimum version of the runtime Java. + */ +package org.opensearch.tools.java_version_checker; diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java index 4789c5df416e6..7a772526cd66b 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java @@ -36,7 +36,7 @@ import org.opensearch.cli.Terminal; /** - * A cli tool for managing secrets in the opensearch keystore. + * A CLI tool for managing secrets in the OpenSearch keystore. */ public class KeyStoreCli extends LoggingAwareMultiCommand { @@ -52,6 +52,12 @@ private KeyStoreCli() { subcommands.put("has-passwd", new HasPasswordKeyStoreCommand()); } + /** + * Main entry point for the OpenSearch Keystore CLI tool. + * + * @param args CLI commands for managing secrets. + * @throws Exception if an exception was encountered executing the command. + */ public static void main(String[] args) throws Exception { exit(new KeyStoreCli().main(args, Terminal.DEFAULT)); } diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java new file mode 100644 index 0000000000000..3969fb4f91e49 --- /dev/null +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Classes implementing a CLI tool for managing secrets in the OpenSearch keystore. + */ +package org.opensearch.common.settings; diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index 52100296ac7e6..7ebe5c7e64416 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -54,6 +54,5 @@ testingConventions { } javadoc.enabled = false -missingJavadoc.enabled = false loggerUsageCheck.enabled = false jarHell.enabled = false diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/package-info.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/package-info.java new file mode 100644 index 0000000000000..c77d9cab1f468 --- /dev/null +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Classes implementing utility methods for launching JVMs. + */ +package org.opensearch.tools.launchers; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java index fc93068ce416b..9b06235e87e86 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java @@ -42,7 +42,7 @@ import java.util.Collections; /** - * A cli tool for adding, removing and listing plugins for opensearch. + * A CLI tool for adding, removing and listing plugins for OpenSearch. */ public class PluginCli extends LoggingAwareMultiCommand { @@ -56,6 +56,12 @@ private PluginCli() { commands = Collections.unmodifiableCollection(subcommands.values()); } + /** + * Main entry point for the OpenSearch Plugin CLI tool. + * + * @param args CLI commands for managing plugins. + * @throws Exception if an exception was encountered executing the command. + */ public static void main(String[] args) throws Exception { exit(new PluginCli().main(args, Terminal.DEFAULT)); } diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java index 1ef4dd9a36d1c..13d8ab62c1f8d 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java @@ -19,6 +19,8 @@ */ public class PluginHelper { + private PluginHelper() {} + /** * Verify if a plugin exists with any folder name. * @param pluginPath the path for the plugins directory. diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java new file mode 100644 index 0000000000000..b762e59ae8095 --- /dev/null +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Classes implementing a CLI tool for managing plugins in OpenSearch. + */ +package org.opensearch.plugins; diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 6b3dacd3e905a..248a714f4f3e3 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -95,10 +95,6 @@ configure([ project(":client:client-benchmark-noop-api-plugin"), project(":client:rest-high-level"), project(":client:test"), - project(":distribution:tools:java-version-checker"), - project(":distribution:tools:keystore-cli"), - project(":distribution:tools:launchers"), - project(":distribution:tools:plugin-cli"), project(":doc-tools"), project(":example-plugins:custom-settings"), project(":example-plugins:custom-significance-heuristic"), From 51a529fc52ddcc79e84f44dc9b610043b0a6c495 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Sep 2022 14:07:12 -0700 Subject: [PATCH 53/78] Bump reactive-streams from 1.0.3 to 1.0.4 in /plugins/repository-azure (#4488) * Bump reactive-streams from 1.0.3 to 1.0.4 in /plugins/repository-azure Bumps [reactive-streams](https://github.com/reactive-streams/reactive-streams) from 1.0.3 to 1.0.4. - [Release notes](https://github.com/reactive-streams/reactive-streams/releases) - [Changelog](https://github.com/reactive-streams/reactive-streams-jvm/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/reactive-streams/reactive-streams/compare/v1.0.3...v1.0.4) --- updated-dependencies: - dependency-name: org.reactivestreams:reactive-streams dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 4 +++- plugins/repository-azure/build.gradle | 2 +- .../repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 | 1 - .../repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fb1461fc6afe..3a47bbff73a1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update previous release bwc version to 2.4.0 ([#4455](https://github.com/opensearch-project/OpenSearch/pull/4455)) - 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) - Added missing javadocs for `:distribution:tools` modules ([#4483](https://github.com/opensearch-project/OpenSearch/pull/4483)) +### Dependencies +- Bumps `reactive-streams` from 1.0.3 to 1.0.4 ### Dependencies - Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 @@ -86,4 +88,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 08cd32e80a7ca..755f00ac9b1b3 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -55,7 +55,7 @@ dependencies { api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.16.1' - api 'org.reactivestreams:reactive-streams:1.0.3' + api 'org.reactivestreams:reactive-streams:1.0.4' api 'io.projectreactor:reactor-core:3.4.18' api 'io.projectreactor.netty:reactor-netty:1.0.18' api 'io.projectreactor.netty:reactor-netty-core:1.0.22' diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 deleted file mode 100644 index 77210f7c7b402..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9fb7a7926ffa635b3dcaa5049fb2bfa25b3e7d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 new file mode 100644 index 0000000000000..45a80e3f7e361 --- /dev/null +++ b/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 @@ -0,0 +1 @@ +3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file From 45c959762a50c1f5ecb9233b0e8d311d92f85cba Mon Sep 17 00:00:00 2001 From: Hauck <67768441+hauck-jvsh@users.noreply.github.com> Date: Thu, 15 Sep 2022 00:22:50 -0300 Subject: [PATCH 54/78] Ignore all malformed objects when ignore_malformed is true (#4494) Fixes a bug to not fail the entire document when "ignore_malformed" is set to true. Allowing the valid fields to be indexed and ignore only the malformed fields. Signed-off-by: Hauck --- CHANGELOG.md | 1 + .../opensearch/index/mapper/FieldMapper.java | 29 ++++++++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a47bbff73a1a..fb1bfc91c5361 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - `opensearch.bat` fails to execute when install path includes spaces ([#4362](https://github.com/opensearch-project/OpenSearch/pull/4362)) - Getting security exception due to access denied 'java.lang.RuntimePermission' 'accessDeclaredMembers' when trying to get snapshot with S3 IRSA ([#4469](https://github.com/opensearch-project/OpenSearch/pull/4469)) - Fixed flaky test `ResourceAwareTasksTests.testTaskIdPersistsInThreadContext` ([#4484](https://github.com/opensearch-project/OpenSearch/pull/4484)) +- Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java index 137ca4be1ca87..3acf5d4ea85ee 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java @@ -34,6 +34,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; @@ -268,6 +269,8 @@ public void parse(ParseContext context) throws IOException { try { parseCreateField(context); } catch (Exception e) { + boolean ignore_malformed = false; + if (context.indexSettings() != null) ignore_malformed = IGNORE_MALFORMED_SETTING.get(context.indexSettings().getSettings()); String valuePreview = ""; try { XContentParser parser = context.parser(); @@ -278,23 +281,27 @@ public void parse(ParseContext context) throws IOException { valuePreview = complexValue.toString(); } } catch (Exception innerException) { + if (ignore_malformed == false) { + throw new MapperParsingException( + "failed to parse field [{}] of type [{}] in document with id '{}'. " + "Could not parse field value preview,", + e, + fieldType().name(), + fieldType().typeName(), + context.sourceToParse().id() + ); + } + } + + if (ignore_malformed == false) { throw new MapperParsingException( - "failed to parse field [{}] of type [{}] in document with id '{}'. " + "Could not parse field value preview,", + "failed to parse field [{}] of type [{}] in document with id '{}'. " + "Preview of field's value: '{}'", e, fieldType().name(), fieldType().typeName(), - context.sourceToParse().id() + context.sourceToParse().id(), + valuePreview ); } - - throw new MapperParsingException( - "failed to parse field [{}] of type [{}] in document with id '{}'. " + "Preview of field's value: '{}'", - e, - fieldType().name(), - fieldType().typeName(), - context.sourceToParse().id(), - valuePreview - ); } multiFields.parse(this, context); } From 72e680154428c5c705f68e4d8f72d408b8d01890 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 15 Sep 2022 13:43:39 -0700 Subject: [PATCH 55/78] [AUTO] [main] Added bwc version 2.3.1. (#4513) * Added bwc version 2.3.1 * Add changelog of adding bwc version 2.3.1 Signed-off-by: Tianli Feng --- .ci/bwcVersions | 1 + CHANGELOG.md | 2 ++ server/src/main/java/org/opensearch/Version.java | 1 + 3 files changed, 4 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1dc8dc955f7c6..e82101896818e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -51,4 +51,5 @@ BWC_VERSION: - "2.2.1" - "2.2.2" - "2.3.0" + - "2.3.1" - "2.4.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index fb1bfc91c5361..7839d43209a71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Update previous release bwc version to 2.4.0 ([#4455](https://github.com/opensearch-project/OpenSearch/pull/4455)) - 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) - Added missing javadocs for `:distribution:tools` modules ([#4483](https://github.com/opensearch-project/OpenSearch/pull/4483)) +- Add BWC version 2.3.1 ([#4513](https://github.com/opensearch-project/OpenSearch/pull/4513)) + ### Dependencies - Bumps `reactive-streams` from 1.0.3 to 1.0.4 diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 978f0ee2186f2..9c53a0f449a40 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -98,6 +98,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; From f73a8706aeb3781084e4feaf7a2d88565b817ecb Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 16 Sep 2022 13:23:56 -0700 Subject: [PATCH 56/78] [Segment Replication] Add snapshot and restore tests for segment replication feature (#3993) * [Segment Replication] Add snapshots tests with segment replication enabled Signed-off-by: Suraj Singh * Fix spotless failures Signed-off-by: Suraj Singh * Add changelog entry, address review comments, add failover test Signed-off-by: Suraj Singh * Fix spotless failures Signed-off-by: Suraj Singh * Address review comments 2 Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- CHANGELOG.md | 3 +- .../SegmentReplicationSnapshotIT.java | 279 ++++++++++++++++++ 2 files changed, 281 insertions(+), 1 deletion(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 7839d43209a71..bbe79abf3e38a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) - Added missing javadocs for `:distribution:tools` modules ([#4483](https://github.com/opensearch-project/OpenSearch/pull/4483)) - Add BWC version 2.3.1 ([#4513](https://github.com/opensearch-project/OpenSearch/pull/4513)) +- [Segment Replication] Add snapshot and restore tests for segment replication feature ([#3993](https://github.com/opensearch-project/OpenSearch/pull/3993)) ### Dependencies - Bumps `reactive-streams` from 1.0.3 to 1.0.4 @@ -91,4 +92,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java new file mode 100644 index 0000000000000..d92f2af3f4bfd --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -0,0 +1,279 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.junit.BeforeClass; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationSnapshotIT extends AbstractSnapshotIntegTestCase { + private static final String INDEX_NAME = "test-segrep-idx"; + private static final String RESTORED_INDEX_NAME = INDEX_NAME + "-restored"; + private static final int SHARD_COUNT = 1; + private static final int REPLICA_COUNT = 1; + private static final int DOC_COUNT = 1010; + + private static final String REPOSITORY_NAME = "test-segrep-repo"; + private static final String SNAPSHOT_NAME = "test-segrep-snapshot"; + + @BeforeClass + public static void assumeFeatureFlag() { + assumeTrue("Segment replication Feature flag is enabled", Boolean.parseBoolean(System.getProperty(FeatureFlags.REPLICATION_TYPE))); + } + + public Settings segRepEnableIndexSettings() { + return getShardSettings().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + } + + public Settings docRepEnableIndexSettings() { + return getShardSettings().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); + } + + public Settings.Builder getShardSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT); + } + + public Settings restoreIndexSegRepSettings() { + return Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + } + + public Settings restoreIndexDocRepSettings() { + return Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void ingestData(int docCount, String indexName) throws Exception { + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + indexName, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(docCount); + waitForDocs(docCount, indexer); + refresh(indexName); + } + } + + // Start cluster with provided settings and return the node names as list + public List startClusterWithSettings(Settings indexSettings, int replicaCount) throws Exception { + // Start primary + final String primaryNode = internalCluster().startNode(); + List nodeNames = new ArrayList<>(); + nodeNames.add(primaryNode); + for (int i = 0; i < replicaCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + // Ingest data + ingestData(DOC_COUNT, INDEX_NAME); + return nodeNames; + } + + public void createSnapshot() { + // Snapshot declaration + Path absolutePath = randomRepoPath().toAbsolutePath(); + // Create snapshot + createRepository(REPOSITORY_NAME, "fs", absolutePath); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + .setWaitForCompletion(true) + .setIndices(INDEX_NAME) + .get(); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + } + + public RestoreSnapshotResponse restoreSnapshotWithSettings(Settings indexSettings) { + RestoreSnapshotRequestBuilder builder = client().admin() + .cluster() + .prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + .setWaitForCompletion(false) + .setRenamePattern(INDEX_NAME) + .setRenameReplacement(RESTORED_INDEX_NAME); + if (indexSettings != null) { + builder.setIndexSettings(indexSettings); + } + return builder.get(); + } + + public void testRestoreOnSegRep() throws Exception { + // Start cluster with one primary and one replica node + startClusterWithSettings(segRepEnableIndexSettings(), 1); + createSnapshot(); + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(null); + + // Assertions + assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, "index.replication.type"), "SEGMENT"); + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + public void testSnapshotOnSegRep_RestoreOnSegRepDuringIngestion() throws Exception { + startClusterWithSettings(segRepEnableIndexSettings(), 1); + createSnapshot(); + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(null); + + // Assertions + assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); + ingestData(5000, RESTORED_INDEX_NAME); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, "index.replication.type"), "SEGMENT"); + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT + 5000); + } + + public void testSnapshotOnDocRep_RestoreOnSegRep() throws Exception { + startClusterWithSettings(docRepEnableIndexSettings(), 1); + createSnapshot(); + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(restoreIndexSegRepSettings()); + + // Assertions + assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, "index.replication.type"), "SEGMENT"); + + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + public void testSnapshotOnSegRep_RestoreOnDocRep() throws Exception { + // Start a cluster with one primary and one replica + startClusterWithSettings(segRepEnableIndexSettings(), 1); + createSnapshot(); + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(restoreIndexDocRepSettings()); + + // Assertions + assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, "index.replication.type"), "DOCUMENT"); + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + public void testSnapshotOnDocRep_RestoreOnDocRep() throws Exception { + startClusterWithSettings(docRepEnableIndexSettings(), 1); + createSnapshot(); + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(restoreIndexDocRepSettings()); + + // Assertions + assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, "index.replication.type"), "DOCUMENT"); + + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + public void testRestoreOnReplicaNode() throws Exception { + List nodeNames = startClusterWithSettings(segRepEnableIndexSettings(), 1); + final String primaryNode = nodeNames.get(0); + createSnapshot(); + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + + // stop the primary node so that restoration happens on replica node + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + + RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotWithSettings(null); + + // Assertions + assertThat(restoreSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); + internalCluster().startNode(); + ensureGreen(RESTORED_INDEX_NAME); + GetSettingsResponse settingsResponse = client().admin() + .indices() + .getSettings(new GetSettingsRequest().indices(RESTORED_INDEX_NAME)) + .get(); + assertEquals(settingsResponse.getSetting(RESTORED_INDEX_NAME, "index.replication.type"), "SEGMENT"); + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } +} From 57a4aedb0a66a8a5c9398119c679c615cc5f8c66 Mon Sep 17 00:00:00 2001 From: Ashish Date: Sat, 17 Sep 2022 22:41:20 +0530 Subject: [PATCH 57/78] [Remote Store] Change behaviour in replica recovery for remote translog enabled indices (#4318) Signed-off-by: Ashish Singh --- CHANGELOG.md | 1 + .../opensearch/index/shard/IndexShard.java | 66 ++++++- .../recovery/PeerRecoveryTargetService.java | 77 +++++--- .../recovery/RecoverySourceHandler.java | 129 +++++++------ ...overyWithRemoteTranslogOnPrimaryTests.java | 172 ++++++++++++++++++ .../PeerRecoveryTargetServiceTests.java | 14 +- .../index/shard/IndexShardTestCase.java | 5 +- 7 files changed, 369 insertions(+), 95 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index bbe79abf3e38a..c2e4fc2fe1aba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) - [Segment Replication] Update replicas to commit SegmentInfos instead of relying on SIS files from primary shards. ([#4402](https://github.com/opensearch-project/OpenSearch/pull/4402)) - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) +- [Remote Store] Change behaviour in replica recovery for remote translog enabled indices ([#4318](https://github.com/opensearch-project/OpenSearch/pull/4318)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index dcb7bdeb30e4f..9185ef0d440ce 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -163,8 +163,8 @@ import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; -import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.rest.RestStatus; @@ -203,6 +203,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; +import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; /** @@ -1703,13 +1704,8 @@ public void prepareForIndexRecovery() { * @return a sequence number that an operation-based peer recovery can start with. * This is the first operation after the local checkpoint of the safe commit if exists. */ - public long recoverLocallyUpToGlobalCheckpoint() { - assert Thread.holdsLock(mutex) == false : "recover locally under mutex"; - if (state != IndexShardState.RECOVERING) { - throw new IndexShardNotRecoveringException(shardId, state); - } - recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); - assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]"; + private long recoverLocallyUpToGlobalCheckpoint() { + validateLocalRecoveryState(); final Optional safeCommit; final long globalCheckpoint; try { @@ -1792,6 +1788,54 @@ public long recoverLocallyUpToGlobalCheckpoint() { } } + public long recoverLocallyAndFetchStartSeqNo(boolean localTranslog) { + if (localTranslog) { + return recoverLocallyUpToGlobalCheckpoint(); + } else { + return recoverLocallyUptoLastCommit(); + } + } + + /** + * The method figures out the sequence number basis the last commit. + * + * @return the starting sequence number from which the recovery should start. + */ + private long recoverLocallyUptoLastCommit() { + assert isRemoteTranslogEnabled() : "Remote translog store is not enabled"; + long seqNo; + validateLocalRecoveryState(); + + try { + seqNo = Long.parseLong(store.readLastCommittedSegmentsInfo().getUserData().get(MAX_SEQ_NO)); + } catch (org.apache.lucene.index.IndexNotFoundException e) { + logger.error("skip local recovery as no index commit found", e); + return UNASSIGNED_SEQ_NO; + } catch (Exception e) { + logger.error("skip local recovery as failed to find the safe commit", e); + return UNASSIGNED_SEQ_NO; + } + + try { + maybeCheckIndex(); + recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + recoveryState.getTranslog().totalLocal(0); + } catch (Exception e) { + logger.error("check index failed during fetch seqNo", e); + return UNASSIGNED_SEQ_NO; + } + return seqNo; + } + + private void validateLocalRecoveryState() { + assert Thread.holdsLock(mutex) == false : "recover locally under mutex"; + if (state != IndexShardState.RECOVERING) { + throw new IndexShardNotRecoveringException(shardId, state); + } + recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); + assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]"; + } + public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) { getEngine().translogManager().trimOperationsFromTranslog(getOperationPrimaryTerm(), aboveSeqNo); } @@ -1998,7 +2042,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t private boolean assertSequenceNumbersInCommit() throws IOException { final Map userData = SegmentInfos.readLatestCommit(store.directory()).getUserData(); assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; - assert userData.containsKey(SequenceNumbers.MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number"; + assert userData.containsKey(MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number"; assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid"; assert userData.get(Engine.HISTORY_UUID_KEY).equals(getHistoryUUID()) : "commit point history uuid [" + userData.get(Engine.HISTORY_UUID_KEY) @@ -3275,6 +3319,10 @@ private boolean isRemoteStoreEnabled() { return (remoteStore != null && shardRouting.primary()); } + public boolean isRemoteTranslogEnabled() { + return indexSettings() != null && indexSettings().isRemoteTranslogStoreEnabled(); + } + /** * Acquire a primary operation permit whenever the shard is ready for indexing. If a permit is directly available, the provided * ActionListener will be called on the calling thread. During relocation hand-off, permit acquisition can be delayed. The provided diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 85141556657f3..b5702431ed4bf 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -36,10 +36,10 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.cluster.ClusterState; @@ -219,6 +219,12 @@ protected void reestablishRecovery(final StartRecoveryRequest request, final Str threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryId, request)); } + /** + * Initiates recovery of the replica. TODO - Need to revisit it with PRRL and later. @see + * github issue on it. + * @param recoveryId recovery id + * @param preExistingRequest start recovery request + */ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExistingRequest) { final String actionName; final TransportRequest requestToSend; @@ -238,10 +244,17 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node"; logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); - final long startingSeqNo = indexShard.recoverLocallyUpToGlobalCheckpoint(); + boolean remoteTranslogEnabled = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); + final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!remoteTranslogEnabled); assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]"; - startRequest = getStartRecoveryRequest(logger, clusterService.localNode(), recoveryTarget, startingSeqNo); + startRequest = getStartRecoveryRequest( + logger, + clusterService.localNode(), + recoveryTarget, + startingSeqNo, + !remoteTranslogEnabled + ); requestToSend = startRequest; actionName = PeerRecoverySourceService.Actions.START_RECOVERY; } catch (final Exception e) { @@ -270,21 +283,32 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); } + public static StartRecoveryRequest getStartRecoveryRequest( + Logger logger, + DiscoveryNode localNode, + RecoveryTarget recoveryTarget, + long startingSeqNo + ) { + return getStartRecoveryRequest(logger, localNode, recoveryTarget, startingSeqNo, true); + } + /** * Prepare the start recovery request. * - * @param logger the logger - * @param localNode the local node of the recovery target - * @param recoveryTarget the target of the recovery - * @param startingSeqNo a sequence number that an operation-based peer recovery can start with. - * This is the first operation after the local checkpoint of the safe commit if exists. + * @param logger the logger + * @param localNode the local node of the recovery target + * @param recoveryTarget the target of the recovery + * @param startingSeqNo a sequence number that an operation-based peer recovery can start with. + * This is the first operation after the local checkpoint of the safe commit if exists. + * @param verifyTranslog should the recovery request validate translog consistency with snapshot store metadata. * @return a start recovery request */ public static StartRecoveryRequest getStartRecoveryRequest( Logger logger, DiscoveryNode localNode, RecoveryTarget recoveryTarget, - long startingSeqNo + long startingSeqNo, + boolean verifyTranslog ) { final StartRecoveryRequest request; logger.trace("{} collecting local files for [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode()); @@ -292,22 +316,25 @@ public static StartRecoveryRequest getStartRecoveryRequest( Store.MetadataSnapshot metadataSnapshot; try { metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata(); - // Make sure that the current translog is consistent with the Lucene index; otherwise, we have to throw away the Lucene index. - try { - final String expectedTranslogUUID = metadataSnapshot.getCommitUserData().get(Translog.TRANSLOG_UUID_KEY); - final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation(), expectedTranslogUUID); - assert globalCheckpoint + 1 >= startingSeqNo : "invalid startingSeqNo " + startingSeqNo + " >= " + globalCheckpoint; - } catch (IOException | TranslogCorruptedException e) { - logger.warn( - new ParameterizedMessage( - "error while reading global checkpoint from translog, " - + "resetting the starting sequence number from {} to unassigned and recovering as if there are none", - startingSeqNo - ), - e - ); - metadataSnapshot = Store.MetadataSnapshot.EMPTY; - startingSeqNo = UNASSIGNED_SEQ_NO; + if (verifyTranslog) { + // Make sure that the current translog is consistent with the Lucene index; otherwise, we have to throw away the Lucene + // index. + try { + final String expectedTranslogUUID = metadataSnapshot.getCommitUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation(), expectedTranslogUUID); + assert globalCheckpoint + 1 >= startingSeqNo : "invalid startingSeqNo " + startingSeqNo + " >= " + globalCheckpoint; + } catch (IOException | TranslogCorruptedException e) { + logger.warn( + new ParameterizedMessage( + "error while reading global checkpoint from translog, " + + "resetting the starting sequence number from {} to unassigned and recovering as if there are none", + startingSeqNo + ), + e + ); + metadataSnapshot = Store.MetadataSnapshot.EMPTY; + startingSeqNo = UNASSIGNED_SEQ_NO; + } } } catch (final org.apache.lucene.index.IndexNotFoundException e) { // happens on an empty folder. no need to log diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 9e219db5a4c96..665e79722770e 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -316,60 +316,85 @@ && isTargetSameHistory() } assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; - sendFileStep.whenComplete(r -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[prepareTargetForTranslog]"); - // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); - }, onFailure); - - prepareEngineStep.whenComplete(prepareEngineTime -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[phase2]"); - /* - * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. - * This means that any document indexed into the primary after this will be replicated to this replica as well - * make sure to do this before sampling the max sequence number in the next step, to ensure that we send - * all documents up to maxSeqNo in phase2. - */ - RunUnderPrimaryPermit.run( - () -> shard.initiateTracking(request.targetAllocationId()), - shardId + " initiating tracking of " + request.targetAllocationId(), - shard, - cancellableThreads, - logger - ); - - final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - if (logger.isTraceEnabled()) { - logger.trace("snapshot translog for recovery; current size is [{}]", countNumberOfHistoryOperations(startingSeqNo)); - } - final Translog.Snapshot phase2Snapshot = shard.newChangesSnapshot( - PEER_RECOVERY_NAME, - startingSeqNo, - Long.MAX_VALUE, - false, - true - ); - resources.add(phase2Snapshot); - retentionLock.close(); + boolean isRecoveringReplicaWithRemoteTxLogEnabledIndex = request.isPrimaryRelocation() == false + && shard.isRemoteTranslogEnabled(); + + if (isRecoveringReplicaWithRemoteTxLogEnabledIndex) { + sendFileStep.whenComplete(r -> { + assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[prepareTargetForTranslog]"); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(0, prepareEngineStep); + }, onFailure); + + prepareEngineStep.whenComplete(prepareEngineTime -> { + assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[phase2]"); + RunUnderPrimaryPermit.run( + () -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId(), + shard, + cancellableThreads, + logger + ); + final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); + retentionLock.close(); + sendSnapshotStep.onResponse(new SendSnapshotResult(endingSeqNo, 0, TimeValue.ZERO)); + }, onFailure); + } else { + sendFileStep.whenComplete(r -> { + assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[prepareTargetForTranslog]"); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); + }, onFailure); + + prepareEngineStep.whenComplete(prepareEngineTime -> { + assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[phase2]"); + /* + * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. + * This means that any document indexed into the primary after this will be replicated to this replica as well + * make sure to do this before sampling the max sequence number in the next step, to ensure that we send + * all documents up to maxSeqNo in phase2. + */ + RunUnderPrimaryPermit.run( + () -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId(), + shard, + cancellableThreads, + logger + ); - // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values - // are at least as high as the corresponding values on the primary when any of these operations were executed on it. - final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); - final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); - final RetentionLeases retentionLeases = shard.getRetentionLeases(); - final long mappingVersionOnPrimary = shard.indexSettings().getIndexMetadata().getMappingVersion(); - phase2( - startingSeqNo, - endingSeqNo, - phase2Snapshot, - maxSeenAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, - retentionLeases, - mappingVersionOnPrimary, - sendSnapshotStep - ); + final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); + if (logger.isTraceEnabled()) { + logger.trace("snapshot translog for recovery; current size is [{}]", countNumberOfHistoryOperations(startingSeqNo)); + } + final Translog.Snapshot phase2Snapshot = shard.newChangesSnapshot( + PEER_RECOVERY_NAME, + startingSeqNo, + Long.MAX_VALUE, + false, + true + ); + resources.add(phase2Snapshot); + retentionLock.close(); + + // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values + // are at least as high as the corresponding values on the primary when any of these operations were executed on it. + final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); + final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); + final RetentionLeases retentionLeases = shard.getRetentionLeases(); + final long mappingVersionOnPrimary = shard.indexSettings().getIndexMetadata().getMappingVersion(); + phase2( + startingSeqNo, + endingSeqNo, + phase2Snapshot, + maxSeenAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, + retentionLeases, + mappingVersionOnPrimary, + sendSnapshotStep + ); - }, onFailure); + }, onFailure); + } // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 final long trimAboveSeqNo = startingSeqNo - 1; diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java new file mode 100644 index 0000000000000..5d317693e02df --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.junit.Assert; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.WriteOnlyTranslogManager; +import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; + +public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchIndexLevelReplicationTestCase { + + private static final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, "true") + .build(); + + public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + + // Step1 - Start primary, index docs and flush + shards.startPrimary(); + final IndexShard primary = shards.getPrimary(); + int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + shards.flush(); + + // Step 2 - Start replica for recovery to happen, check both has same number of docs + final IndexShard replica1 = shards.addReplica(); + shards.startAll(); + assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); + + // Step 3 - Index more docs, run segment replication, check both have same number of docs + int moreDocs = shards.indexDocs(randomIntBetween(10, 100)); + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); + + // Step 4 - Check both shard has expected number of doc count + assertDocCount(primary, numDocs + moreDocs); + assertDocCount(replica1, numDocs + moreDocs); + + // Step 5 - Start new replica, recovery happens, and check that new replica has docs upto last flush + final IndexShard replica2 = shards.addReplica(); + shards.startAll(); + assertDocCount(replica2, numDocs); + + // Step 6 - Segment replication, check all shards have same number of docs + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs + moreDocs); + } + } + + public void testNoTranslogHistoryTransferred() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + + // Step1 - Start primary, index docs, flush, index more docs, check translog in primary as expected + shards.startPrimary(); + final IndexShard primary = shards.getPrimary(); + int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + shards.flush(); + List docIdAndSeqNosAfterFlush = getDocIdAndSeqNos(primary); + int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); + assertEquals(moreDocs, getTranslog(primary).totalOperations()); + + // Step 2 - Start replica, recovery happens, check docs recovered till last flush + final IndexShard replica = shards.addReplica(); + shards.startAll(); + assertEquals(docIdAndSeqNosAfterFlush, getDocIdAndSeqNos(replica)); + assertDocCount(replica, numDocs); + assertEquals(NRTReplicationEngine.class, replica.getEngine().getClass()); + + // Step 3 - Check replica's translog has no operations + assertEquals(WriteOnlyTranslogManager.class, replica.getEngine().translogManager().getClass()); + WriteOnlyTranslogManager replicaTranslogManager = (WriteOnlyTranslogManager) replica.getEngine().translogManager(); + assertEquals(0, replicaTranslogManager.getTranslog().totalOperations()); + + // Adding this for close to succeed + shards.flush(); + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs + moreDocs); + } + } + + public void testStartSequenceForReplicaRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + + shards.startPrimary(); + final IndexShard primary = shards.getPrimary(); + int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + shards.flush(); + + final IndexShard replica = shards.addReplica(); + shards.startAll(); + + allowShardFailures(); + replica.failShard("test", null); + + final ShardRouting replicaRouting = replica.routingEntry(); + final IndexMetadata newIndexMetadata = IndexMetadata.builder(replica.indexSettings().getIndexMetadata()) + .primaryTerm(replicaRouting.shardId().id(), replica.getOperationPrimaryTerm() + 1) + .build(); + closeShards(replica); + shards.removeReplica(replica); + + int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); + shards.flush(); + + IndexShard newReplicaShard = newShard( + newShardRouting( + replicaRouting.shardId(), + replicaRouting.currentNodeId(), + false, + ShardRoutingState.INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE + ), + replica.shardPath(), + newIndexMetadata, + null, + null, + replica.getEngineFactory(), + replica.getEngineConfigFactory(), + replica.getGlobalCheckpointSyncer(), + replica.getRetentionLeaseSyncer(), + EMPTY_EVENT_LISTENER, + null + ); + shards.addReplica(newReplicaShard); + shards.recoverReplica(newReplicaShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { + @Override + public IndexShard indexShard() { + IndexShard idxShard = super.indexShard(); + // verify the starting sequence number while recovering a failed shard which has a valid last commit + long startingSeqNo = -1; + try { + startingSeqNo = Long.parseLong( + idxShard.store().readLastCommittedSegmentsInfo().getUserData().get(SequenceNumbers.MAX_SEQ_NO) + ); + } catch (IOException e) { + Assert.fail(); + } + assertEquals(numDocs - 1, startingSeqNo); + return idxShard; + } + }); + + shards.flush(); + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs + moreDocs); + } + } +} diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 2a88345346e52..a50089831b3e9 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -211,7 +211,7 @@ public void testPrepareIndexForPeerRecovery() throws Exception { IndexShard shard = newShard(false); shard.markAsRecovering("for testing", new RecoveryState(shard.routingEntry(), localNode, localNode)); shard.prepareForIndexRecovery(); - assertThat(shard.recoverLocallyUpToGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(shard.recoverLocallyAndFetchStartSeqNo(true), equalTo(UNASSIGNED_SEQ_NO)); assertThat(shard.recoveryState().getTranslog().totalLocal(), equalTo(RecoveryState.Translog.UNKNOWN)); assertThat(shard.recoveryState().getTranslog().recoveredOperations(), equalTo(0)); assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -239,7 +239,7 @@ public void testPrepareIndexForPeerRecovery() throws Exception { ); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); replica.prepareForIndexRecovery(); - assertThat(replica.recoverLocallyUpToGlobalCheckpoint(), equalTo(globalCheckpoint + 1)); + assertThat(replica.recoverLocallyAndFetchStartSeqNo(true), equalTo(globalCheckpoint + 1)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(expectedTotalLocal)); assertThat(replica.recoveryState().getTranslog().recoveredOperations(), equalTo(expectedTotalLocal)); assertThat(replica.getLastKnownGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -254,7 +254,7 @@ public void testPrepareIndexForPeerRecovery() throws Exception { replica = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE)); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); replica.prepareForIndexRecovery(); - assertThat(replica.recoverLocallyUpToGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(replica.recoverLocallyAndFetchStartSeqNo(true), equalTo(UNASSIGNED_SEQ_NO)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(RecoveryState.Translog.UNKNOWN)); assertThat(replica.recoveryState().getTranslog().recoveredOperations(), equalTo(0)); assertThat(replica.getLastKnownGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -276,10 +276,10 @@ public void testPrepareIndexForPeerRecovery() throws Exception { replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); replica.prepareForIndexRecovery(); if (safeCommit.isPresent()) { - assertThat(replica.recoverLocallyUpToGlobalCheckpoint(), equalTo(safeCommit.get().localCheckpoint + 1)); + assertThat(replica.recoverLocallyAndFetchStartSeqNo(true), equalTo(safeCommit.get().localCheckpoint + 1)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(0)); } else { - assertThat(replica.recoverLocallyUpToGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(replica.recoverLocallyAndFetchStartSeqNo(true), equalTo(UNASSIGNED_SEQ_NO)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(RecoveryState.Translog.UNKNOWN)); } assertThat(replica.recoveryState().getStage(), equalTo(RecoveryState.Stage.TRANSLOG)); @@ -322,7 +322,7 @@ public void testClosedIndexSkipsLocalRecovery() throws Exception { ); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); replica.prepareForIndexRecovery(); - assertThat(replica.recoverLocallyUpToGlobalCheckpoint(), equalTo(safeCommit.get().localCheckpoint + 1)); + assertThat(replica.recoverLocallyAndFetchStartSeqNo(true), equalTo(safeCommit.get().localCheckpoint + 1)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(0)); assertThat(replica.recoveryState().getTranslog().recoveredOperations(), equalTo(0)); assertThat(replica.getLastKnownGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -349,7 +349,7 @@ public void testResetStartingSeqNoIfLastCommitCorrupted() throws Exception { shard = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE)); shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode)); shard.prepareForIndexRecovery(); - long startingSeqNo = shard.recoverLocallyUpToGlobalCheckpoint(); + long startingSeqNo = shard.recoverLocallyAndFetchStartSeqNo(true); shard.store().markStoreCorrupted(new IOException("simulated")); RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(logger, rNode, recoveryTarget, startingSeqNo); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 09eca006d600a..1fcdfd79c544e 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -134,7 +134,6 @@ import java.io.IOException; import java.util.ArrayList; import java.nio.file.Path; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -853,7 +852,9 @@ protected final void recoverUnstartedReplica( } replica.prepareForIndexRecovery(); final RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode); - final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint(); + IndexShard indexShard = recoveryTarget.indexShard(); + boolean remoteTranslogEnabled = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); + final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!remoteTranslogEnabled); final StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest( logger, rNode, From 465771631d2e3a5001d97d0c7e845e6f8ea6bd15 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Mon, 19 Sep 2022 14:13:18 +0530 Subject: [PATCH 58/78] =?UTF-8?q?Weighted=20round-robin=20scheduling=20pol?= =?UTF-8?q?icy=20for=20shard=20coordination=20traffic=E2=80=A6=20(#4241)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Weighted round-robin scheduling policy for shard coordination traffic routing Signed-off-by: Anshu Agarwal --- CHANGELOG.md | 1 + .../org/opensearch/cluster/ClusterModule.java | 9 + .../opensearch/cluster/metadata/Metadata.java | 8 + .../metadata/WeightedRoutingMetadata.java | 167 +++++++++ .../routing/IndexShardRoutingTable.java | 134 ++++++++ .../cluster/routing/OperationRouting.java | 46 ++- .../cluster/routing/WeightedRoundRobin.java | 106 ++++++ .../cluster/routing/WeightedRouting.java | 75 +++++ .../common/settings/ClusterSettings.java | 1 + .../WeightedRoutingMetadataTests.java | 36 ++ .../routing/OperationRoutingTests.java | 318 ++++++++++++++++++ .../routing/WeightedRoundRobinTests.java | 151 +++++++++ .../structure/RoutingIteratorTests.java | 210 ++++++++++++ 13 files changed, 1254 insertions(+), 8 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/WeightedRoundRobin.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java create mode 100644 server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/WeightedRoundRobinTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index c2e4fc2fe1aba..3c610c89eeaf3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) +- Weighted round-robin scheduling policy for shard coordination traffic ([#4241](https://github.com/opensearch-project/OpenSearch/pull/4241)) - Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) - [Segment Replication] Update replicas to commit SegmentInfos instead of relying on SIS files from primary shards. ([#4402](https://github.com/opensearch-project/OpenSearch/pull/4402)) - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index f8ba520e465e2..46552bb5d6a03 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.DelayedAllocationService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; @@ -191,6 +192,7 @@ public static List getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom ); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); + registerMetadataCustom(entries, WeightedRoutingMetadata.TYPE, WeightedRoutingMetadata::new, WeightedRoutingMetadata::readDiffFrom); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); return entries; @@ -274,6 +276,13 @@ public static List getNamedXWriteables() { DataStreamMetadata::fromXContent ) ); + entries.add( + new NamedXContentRegistry.Entry( + Metadata.Custom.class, + new ParseField(WeightedRoutingMetadata.TYPE), + WeightedRoutingMetadata::fromXContent + ) + ); return entries; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 5f7e98e9e1199..086865d2170c3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -810,6 +810,14 @@ public IndexGraveyard indexGraveyard() { return custom(IndexGraveyard.TYPE); } + /** + * * + * @return The weighted routing metadata for search requests + */ + public WeightedRoutingMetadata weightedRoutingMetadata() { + return custom(WeightedRoutingMetadata.TYPE); + } + public T custom(String type) { return (T) customs.get(type); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java new file mode 100644 index 0000000000000..27beb21f28f7c --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; +import org.opensearch.cluster.AbstractNamedDiffable; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; + +/** + * Contains metadata for weighted routing + * + * @opensearch.internal + */ +public class WeightedRoutingMetadata extends AbstractNamedDiffable implements Metadata.Custom { + private static final Logger logger = LogManager.getLogger(WeightedRoutingMetadata.class); + public static final String TYPE = "weighted_shard_routing"; + public static final String AWARENESS = "awareness"; + private WeightedRouting weightedRouting; + + public WeightedRouting getWeightedRouting() { + return weightedRouting; + } + + public WeightedRoutingMetadata setWeightedRouting(WeightedRouting weightedRouting) { + this.weightedRouting = weightedRouting; + return this; + } + + public WeightedRoutingMetadata(StreamInput in) throws IOException { + if (in.available() != 0) { + this.weightedRouting = new WeightedRouting(in); + } + } + + public WeightedRoutingMetadata(WeightedRouting weightedRouting) { + this.weightedRouting = weightedRouting; + } + + @Override + public EnumSet context() { + return Metadata.API_AND_GATEWAY; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_2_4_0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (weightedRouting != null) { + weightedRouting.writeTo(out); + } + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Metadata.Custom.class, TYPE, in); + } + + public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws IOException { + String attrKey = null; + Double attrValue; + String attributeName = null; + Map weights = new HashMap<>(); + WeightedRouting weightedRouting = null; + XContentParser.Token token; + // move to the first alias + parser.nextToken(); + String awarenessField = null; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + awarenessField = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException( + "failed to parse weighted routing metadata [{}], expected " + "object", + awarenessField + ); + } + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + attributeName = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException( + "failed to parse weighted routing metadata [{}], expected" + " object", + attributeName + ); + } + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + attrKey = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + attrValue = Double.parseDouble(parser.text()); + weights.put(attrKey, attrValue); + } else { + throw new OpenSearchParseException( + "failed to parse weighted routing metadata attribute " + "[{}], unknown type", + attributeName + ); + } + } + } + } + } + weightedRouting = new WeightedRouting(attributeName, weights); + return new WeightedRoutingMetadata(weightedRouting); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WeightedRoutingMetadata that = (WeightedRoutingMetadata) o; + return weightedRouting.equals(that.weightedRouting); + } + + @Override + public int hashCode() { + return weightedRouting.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + toXContent(weightedRouting, builder); + return builder; + } + + public static void toXContent(WeightedRouting weightedRouting, XContentBuilder builder) throws IOException { + builder.startObject(AWARENESS); + builder.startObject(weightedRouting.attributeName()); + for (Map.Entry entry : weightedRouting.weights().entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index d4597f47d9a6c..9026e7068e9fe 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -85,6 +85,9 @@ public class IndexShardRoutingTable implements Iterable { private volatile Map activeShardsByAttributes = emptyMap(); private volatile Map initializingShardsByAttributes = emptyMap(); private final Object shardsByAttributeMutex = new Object(); + private final Object shardsByWeightMutex = new Object(); + private volatile Map> activeShardsByWeight = emptyMap(); + private volatile Map> initializingShardsByWeight = emptyMap(); /** * The initializing list, including ones that are initializing on a target node because of relocation. @@ -233,6 +236,10 @@ public List assignedShards() { return this.assignedShards; } + public Map> getActiveShardsByWeight() { + return activeShardsByWeight; + } + public ShardIterator shardsRandomIt() { return new PlainShardIterator(shardId, shuffler.shuffle(shards)); } @@ -292,6 +299,73 @@ public ShardIterator activeInitializingShardsRankedIt( return new PlainShardIterator(shardId, ordered); } + /** + * Returns an iterator over active and initializing shards, shards are ordered by weighted + * round-robin scheduling policy. + * + * @param weightedRouting entity + * @param nodes discovered nodes in the cluster + * @return an iterator over active and initializing shards, ordered by weighted round-robin + * scheduling policy. Making sure that initializing shards are the last to iterate through. + */ + public ShardIterator activeInitializingShardsWeightedIt(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + final int seed = shuffler.nextSeed(); + List ordered = new ArrayList<>(); + List orderedActiveShards = getActiveShardsByWeight(weightedRouting, nodes, defaultWeight); + ordered.addAll(shuffler.shuffle(orderedActiveShards, seed)); + if (!allInitializingShards.isEmpty()) { + List orderedInitializingShards = getInitializingShardsByWeight(weightedRouting, nodes, defaultWeight); + ordered.addAll(orderedInitializingShards); + } + return new PlainShardIterator(shardId, ordered); + } + + /** + * Returns a list containing shard routings ordered using weighted round-robin scheduling. + */ + private List shardsOrderedByWeight( + List shards, + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight + ) { + WeightedRoundRobin weightedRoundRobin = new WeightedRoundRobin<>( + calculateShardWeight(shards, weightedRouting, nodes, defaultWeight) + ); + List> shardsOrderedbyWeight = weightedRoundRobin.orderEntities(); + List orderedShardRouting = new ArrayList<>(activeShards.size()); + if (shardsOrderedbyWeight != null) { + for (WeightedRoundRobin.Entity shardRouting : shardsOrderedbyWeight) { + orderedShardRouting.add(shardRouting.getTarget()); + } + } + return orderedShardRouting; + } + + /** + * Returns a list containing shard routing and associated weight. This function iterates through all the shards and + * uses weighted routing to find weight for the corresponding shard. This is fed to weighted round-robin scheduling + * to order shards by weight. + */ + private List> calculateShardWeight( + List shards, + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight + ) { + List> shardsWithWeights = new ArrayList<>(); + for (ShardRouting shard : shards) { + DiscoveryNode node = nodes.get(shard.currentNodeId()); + if (node != null) { + String attVal = node.getAttributes().get(weightedRouting.attributeName()); + // If weight for a zone is not defined, considering it as 1 by default + Double weight = weightedRouting.weights().getOrDefault(attVal, defaultWeight); + shardsWithWeights.add(new WeightedRoundRobin.Entity<>(weight, shard)); + } + } + return shardsWithWeights; + } + private static Set getAllNodeIds(final List shards) { final Set nodeIds = new HashSet<>(); for (ShardRouting shard : shards) { @@ -698,6 +772,66 @@ public int shardsMatchingPredicateCount(Predicate predicate) { return count; } + /** + * Key for WeightedRouting Shard Iterator + * + * @opensearch.internal + */ + public static class WeightedRoutingKey { + private final WeightedRouting weightedRouting; + + public WeightedRoutingKey(WeightedRouting weightedRouting) { + this.weightedRouting = weightedRouting; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WeightedRoutingKey key = (WeightedRoutingKey) o; + if (!weightedRouting.equals(key.weightedRouting)) return false; + return true; + } + + @Override + public int hashCode() { + int result = weightedRouting.hashCode(); + return result; + } + } + + /** + * * + * Gets active shard routing from memory if available, else calculates and put it in memory. + */ + private List getActiveShardsByWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + List shardRoutings = activeShardsByWeight.get(key); + if (shardRoutings == null) { + synchronized (shardsByWeightMutex) { + shardRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); + activeShardsByWeight = new MapBuilder().put(key, shardRoutings).immutableMap(); + } + } + return shardRoutings; + } + + /** + * * + * Gets initializing shard routing from memory if available, else calculates and put it in memory. + */ + private List getInitializingShardsByWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + List shardRoutings = initializingShardsByWeight.get(key); + if (shardRoutings == null) { + synchronized (shardsByWeightMutex) { + shardRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); + initializingShardsByWeight = new MapBuilder().put(key, shardRoutings).immutableMap(); + } + } + return shardRoutings; + } + /** * Builder of an index shard routing table. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 30f6408c19783..9026da667ccb0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.Nullable; @@ -75,9 +76,17 @@ public class OperationRouting { Setting.Property.Dynamic, Setting.Property.NodeScope ); + public static final Setting WEIGHTED_ROUTING_DEFAULT_WEIGHT = Setting.doubleSetting( + "cluster.routing.weighted.default_weight", + 1.0, + 1.0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); private volatile List awarenessAttributes; private volatile boolean useAdaptiveReplicaSelection; private volatile boolean ignoreAwarenessAttr; + private volatile double weightedRoutingDefaultWeight; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { // whether to ignore awareness attributes when routing requests @@ -88,8 +97,10 @@ public OperationRouting(Settings settings, ClusterSettings clusterSettings) { this::setAwarenessAttributes ); this.useAdaptiveReplicaSelection = USE_ADAPTIVE_REPLICA_SELECTION_SETTING.get(settings); + this.weightedRoutingDefaultWeight = WEIGHTED_ROUTING_DEFAULT_WEIGHT.get(settings); clusterSettings.addSettingsUpdateConsumer(USE_ADAPTIVE_REPLICA_SELECTION_SETTING, this::setUseAdaptiveReplicaSelection); clusterSettings.addSettingsUpdateConsumer(IGNORE_AWARENESS_ATTRIBUTES_SETTING, this::setIgnoreAwarenessAttributes); + clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_DEFAULT_WEIGHT, this::setWeightedRoutingDefaultWeight); } void setUseAdaptiveReplicaSelection(boolean useAdaptiveReplicaSelection) { @@ -100,6 +111,10 @@ void setIgnoreAwarenessAttributes(boolean ignoreAwarenessAttributes) { this.ignoreAwarenessAttr = ignoreAwarenessAttributes; } + void setWeightedRoutingDefaultWeight(double weightedRoutingDefaultWeight) { + this.weightedRoutingDefaultWeight = weightedRoutingDefaultWeight; + } + public boolean isIgnoreAwarenessAttr() { return ignoreAwarenessAttr; } @@ -116,6 +131,10 @@ public boolean ignoreAwarenessAttributes() { return this.awarenessAttributes.isEmpty() || this.ignoreAwarenessAttr; } + public double getWeightedRoutingDefaultWeight() { + return this.weightedRoutingDefaultWeight; + } + public ShardIterator indexShards(ClusterState clusterState, String index, String id, @Nullable String routing) { return shards(clusterState, index, id, routing).shardsIt(); } @@ -133,7 +152,8 @@ public ShardIterator getShards( clusterState.nodes(), preference, null, - null + null, + clusterState.getMetadata().weightedRoutingMetadata() ); } @@ -145,7 +165,8 @@ public ShardIterator getShards(ClusterState clusterState, String index, int shar clusterState.nodes(), preference, null, - null + null, + clusterState.metadata().weightedRoutingMetadata() ); } @@ -175,7 +196,8 @@ public GroupShardsIterator searchShards( clusterState.nodes(), preference, collectorService, - nodeCounts + nodeCounts, + clusterState.metadata().weightedRoutingMetadata() ); if (iterator != null) { set.add(iterator); @@ -225,10 +247,11 @@ private ShardIterator preferenceActiveShardIterator( DiscoveryNodes nodes, @Nullable String preference, @Nullable ResponseCollectorService collectorService, - @Nullable Map nodeCounts + @Nullable Map nodeCounts, + @Nullable WeightedRoutingMetadata weightedRoutingMetadata ) { if (preference == null || preference.isEmpty()) { - return shardRoutings(indexShard, nodes, collectorService, nodeCounts); + return shardRoutings(indexShard, nodes, collectorService, nodeCounts, weightedRoutingMetadata); } if (preference.charAt(0) == '_') { Preference preferenceType = Preference.parse(preference); @@ -255,7 +278,7 @@ private ShardIterator preferenceActiveShardIterator( } // no more preference if (index == -1 || index == preference.length() - 1) { - return shardRoutings(indexShard, nodes, collectorService, nodeCounts); + return shardRoutings(indexShard, nodes, collectorService, nodeCounts, weightedRoutingMetadata); } else { // update the preference and continue preference = preference.substring(index + 1); @@ -298,9 +321,16 @@ private ShardIterator shardRoutings( IndexShardRoutingTable indexShard, DiscoveryNodes nodes, @Nullable ResponseCollectorService collectorService, - @Nullable Map nodeCounts + @Nullable Map nodeCounts, + @Nullable WeightedRoutingMetadata weightedRoutingMetadata ) { - if (ignoreAwarenessAttributes()) { + if (weightedRoutingMetadata != null) { + return indexShard.activeInitializingShardsWeightedIt( + weightedRoutingMetadata.getWeightedRouting(), + nodes, + getWeightedRoutingDefaultWeight() + ); + } else if (ignoreAwarenessAttributes()) { if (useAdaptiveReplicaSelection) { return indexShard.activeInitializingShardsRankedIt(collectorService, nodeCounts); } else { diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoundRobin.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoundRobin.java new file mode 100644 index 0000000000000..15d437db9c8ff --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoundRobin.java @@ -0,0 +1,106 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import java.util.ArrayList; +import java.util.List; + +/** + * Weighted Round Robin Scheduling policy + * + */ +public class WeightedRoundRobin { + + private List> entities; + + public WeightedRoundRobin(List> entities) { + this.entities = entities; + } + + /** + * * + * @return list of entities that is ordered using weighted round-robin scheduling + * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling + */ + public List> orderEntities() { + int lastSelectedEntity = -1; + int size = entities.size(); + double currentWeight = 0; + List> orderedWeight = new ArrayList<>(); + if (size == 0) { + return null; + } + // Find maximum weight and greatest common divisor of weight across all entities + double maxWeight = 0; + double sumWeight = 0; + Double gcd = null; + for (WeightedRoundRobin.Entity entity : entities) { + maxWeight = Math.max(maxWeight, entity.getWeight()); + gcd = (gcd == null) ? entity.getWeight() : gcd(gcd, entity.getWeight()); + sumWeight += entity.getWeight() > 0 ? entity.getWeight() : 0; + } + int count = 0; + while (count < sumWeight) { + lastSelectedEntity = (lastSelectedEntity + 1) % size; + if (lastSelectedEntity == 0) { + currentWeight = currentWeight - gcd; + if (currentWeight <= 0) { + currentWeight = maxWeight; + if (currentWeight == 0) { + return orderedWeight; + } + } + } + if (entities.get(lastSelectedEntity).getWeight() >= currentWeight) { + orderedWeight.add(entities.get(lastSelectedEntity)); + count++; + } + } + return orderedWeight; + } + + /** + * Return greatest common divisor for two integers + * https://en.wikipedia.org/wiki/Greatest_common_divisor#Using_Euclid.27s_algorithm + * + * @param a first number + * @param b second number + * @return greatest common divisor + */ + private double gcd(double a, double b) { + return (b == 0) ? a : gcd(b, a % b); + } + + static final class Entity { + private double weight; + private T target; + + public Entity(double weight, T target) { + this.weight = weight; + this.target = target; + } + + public T getTarget() { + return this.target; + } + + public void setTarget(T target) { + this.target = target; + } + + public double getWeight() { + return this.weight; + } + + public void setWeight(double weight) { + this.weight = weight; + } + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java new file mode 100644 index 0000000000000..df2d8d595eaab --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +/** + * Entity for Weighted Round Robin weights + * + * @opensearch.internal + */ +public class WeightedRouting implements Writeable { + private String attributeName; + private Map weights; + + public WeightedRouting(String attributeName, Map weights) { + this.attributeName = attributeName; + this.weights = weights; + } + + public WeightedRouting(WeightedRouting weightedRouting) { + this.attributeName = weightedRouting.attributeName(); + this.weights = weightedRouting.weights; + } + + public WeightedRouting(StreamInput in) throws IOException { + attributeName = in.readString(); + weights = (Map) in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(attributeName); + out.writeGenericValue(weights); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WeightedRouting that = (WeightedRouting) o; + if (!attributeName.equals(that.attributeName)) return false; + return weights.equals(that.weights); + } + + @Override + public int hashCode() { + return Objects.hash(attributeName, weights); + } + + @Override + public String toString() { + return "WeightedRouting{" + attributeName + "}{" + weights().toString() + "}"; + } + + public Map weights() { + return this.weights; + } + + public String attributeName() { + return this.attributeName; + } +} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 971fb518ff1da..1665614c18496 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -529,6 +529,7 @@ public void apply(Settings value, Settings current, Settings previous) { Node.BREAKER_TYPE_KEY, OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING, OperationRouting.IGNORE_AWARENESS_ATTRIBUTES_SETTING, + OperationRouting.WEIGHTED_ROUTING_DEFAULT_WEIGHT, IndexGraveyard.SETTING_MAX_TOMBSTONES, PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java new file mode 100644 index 0000000000000..a0a9d2bd9586b --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Map; + +public class WeightedRoutingMetadataTests extends AbstractXContentTestCase { + @Override + protected WeightedRoutingMetadata createTestInstance() { + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting); + return weightedRoutingMetadata; + } + + @Override + protected WeightedRoutingMetadata doParseInstance(XContentParser parser) throws IOException { + return WeightedRoutingMetadata.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 8bf2b1626292a..87cab4a006a63 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; @@ -759,6 +760,232 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except terminate(threadPool); } + private ClusterState clusterStateForWeightedRouting(String[] indexNames, int numShards, int numReplicas) { + DiscoveryNode[] allNodes = setUpNodesForWeightedRouting(); + ClusterState state = ClusterStateCreationUtils.state(allNodes[0], allNodes[6], allNodes); + + Map> discoveryNodeMap = new HashMap<>(); + List nodesZoneA = new ArrayList<>(); + nodesZoneA.add(allNodes[0]); + nodesZoneA.add(allNodes[1]); + + List nodesZoneB = new ArrayList<>(); + nodesZoneB.add(allNodes[2]); + nodesZoneB.add(allNodes[3]); + + List nodesZoneC = new ArrayList<>(); + nodesZoneC.add(allNodes[4]); + nodesZoneC.add(allNodes[5]); + discoveryNodeMap.put("a", nodesZoneA); + discoveryNodeMap.put("b", nodesZoneB); + discoveryNodeMap.put("c", nodesZoneC); + + // Updating cluster state with node, index and shard details + state = updateStatetoTestWeightedRouting(indexNames, numShards, numReplicas, state, discoveryNodeMap); + + return state; + + } + + private ClusterState setWeightedRoutingWeights(ClusterState clusterState, Map weights) { + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting); + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.putCustom(WeightedRoutingMetadata.TYPE, weightedRoutingMetadata); + clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); + return clusterState; + } + + public void testWeightedOperationRouting() throws Exception { + final int numIndices = 2; + final int numShards = 3; + final int numReplicas = 2; + // setting up indices + final String[] indexNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indexNames[i] = "test" + i; + } + ClusterService clusterService = null; + TestThreadPool threadPool = null; + try { + ClusterState state = clusterStateForWeightedRouting(indexNames, numShards, numReplicas); + + Settings setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + + threadPool = new TestThreadPool("testThatOnlyNodesSupport"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + + OperationRouting opRouting = new OperationRouting( + setting, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + assertTrue(opRouting.ignoreAwarenessAttributes()); + Set selectedNodes = new HashSet<>(); + ResponseCollectorService collector = new ResponseCollectorService(clusterService); + Map outstandingRequests = new HashMap<>(); + + // Setting up weights for weighted round-robin in cluster state + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + state = setWeightedRoutingWeights(state, weights); + + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + // search shards call + GroupShardsIterator groupIterator = opRouting.searchShards( + state, + indexNames, + null, + null, + collector, + outstandingRequests + + ); + + for (ShardIterator it : groupIterator) { + List shardRoutings = Collections.singletonList(it.nextOrNull()); + for (ShardRouting shardRouting : shardRoutings) { + selectedNodes.add(shardRouting.currentNodeId()); + } + } + // tests no shards are assigned to nodes in zone c + for (String nodeID : selectedNodes) { + // No shards are assigned to nodes in zone c since its weight is 0 + assertFalse(nodeID.contains("c")); + } + + selectedNodes = new HashSet<>(); + setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + + // Updating weighted round robin weights in cluster state + weights = Map.of("a", 1.0, "b", 0.0, "c", 1.0); + state = setWeightedRoutingWeights(state, weights); + + builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + + // search shards call + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + + for (ShardIterator it : groupIterator) { + List shardRoutings = Collections.singletonList(it.nextOrNull()); + for (ShardRouting shardRouting : shardRoutings) { + selectedNodes.add(shardRouting.currentNodeId()); + } + } + // tests that no shards are assigned to zone with weight zero + for (String nodeID : selectedNodes) { + // No shards are assigned to nodes in zone b since its weight is 0 + assertFalse(nodeID.contains("b")); + } + } finally { + IOUtils.close(clusterService); + terminate(threadPool); + } + } + + public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Exception { + final int numIndices = 2; + final int numShards = 3; + final int numReplicas = 2; + // setting up indices + final String[] indexNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indexNames[i] = "test" + i; + } + + ClusterService clusterService = null; + TestThreadPool threadPool = null; + try { + ClusterState state = clusterStateForWeightedRouting(indexNames, numShards, numReplicas); + + Settings setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + + threadPool = new TestThreadPool("testThatOnlyNodesSupport"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + + OperationRouting opRouting = new OperationRouting( + setting, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + assertTrue(opRouting.ignoreAwarenessAttributes()); + Set selectedNodes = new HashSet<>(); + ResponseCollectorService collector = new ResponseCollectorService(clusterService); + Map outstandingRequests = new HashMap<>(); + + // Setting up weights for weighted round-robin in cluster state, weight for nodes in zone b is not set + Map weights = Map.of("a", 1.0, "c", 0.0); + state = setWeightedRoutingWeights(state, weights); + ClusterServiceUtils.setState(clusterService, ClusterState.builder(state)); + + // search shards call + GroupShardsIterator groupIterator = opRouting.searchShards( + state, + indexNames, + null, + null, + collector, + outstandingRequests + + ); + + for (ShardIterator it : groupIterator) { + List shardRoutings = Collections.singletonList(it.nextOrNull()); + for (ShardRouting shardRouting : shardRoutings) { + selectedNodes.add(shardRouting.currentNodeId()); + } + } + boolean weighAwayNodesInUndefinedZone = true; + // tests no shards are assigned to nodes in zone c + // tests shards are assigned to nodes in zone b + for (String nodeID : selectedNodes) { + // shard from nodes in zone c is not selected since its weight is 0 + assertFalse(nodeID.contains("c")); + if (nodeID.contains("b")) { + weighAwayNodesInUndefinedZone = false; + } + } + assertFalse(weighAwayNodesInUndefinedZone); + + selectedNodes = new HashSet<>(); + setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + + // Updating weighted round robin weights in cluster state + weights = Map.of("a", 0.0, "b", 1.0); + + state = setWeightedRoutingWeights(state, weights); + ClusterServiceUtils.setState(clusterService, ClusterState.builder(state)); + + opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + + // search shards call + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + + for (ShardIterator it : groupIterator) { + List shardRoutings = Collections.singletonList(it.nextOrNull()); + for (ShardRouting shardRouting : shardRoutings) { + selectedNodes.add(shardRouting.currentNodeId()); + } + } + // tests that no shards are assigned to zone with weight zero + // tests shards are assigned to nodes in zone c + weighAwayNodesInUndefinedZone = true; + for (String nodeID : selectedNodes) { + // shard from nodes in zone a is not selected since its weight is 0 + assertFalse(nodeID.contains("a")); + if (nodeID.contains("c")) { + weighAwayNodesInUndefinedZone = false; + } + } + assertFalse(weighAwayNodesInUndefinedZone); + } finally { + IOUtils.close(clusterService); + terminate(threadPool); + } + } + private DiscoveryNode[] setupNodes() { // Sets up two data nodes in zone-a and one data node in zone-b List zones = Arrays.asList("a", "a", "b"); @@ -785,6 +1012,32 @@ private DiscoveryNode[] setupNodes() { return allNodes; } + private DiscoveryNode[] setUpNodesForWeightedRouting() { + List zones = Arrays.asList("a", "a", "b", "b", "c", "c"); + DiscoveryNode[] allNodes = new DiscoveryNode[7]; + int i = 0; + for (String zone : zones) { + DiscoveryNode node = new DiscoveryNode( + "node_" + zone + "_" + i, + buildNewFakeTransportAddress(), + singletonMap("zone", zone), + Collections.singleton(DiscoveryNodeRole.DATA_ROLE), + Version.CURRENT + ); + allNodes[i++] = node; + } + + DiscoveryNode clusterManager = new DiscoveryNode( + "cluster-manager", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + Version.CURRENT + ); + allNodes[i] = clusterManager; + return allNodes; + } + public void testAllocationAwarenessDeprecation() { OperationRouting routing = new OperationRouting( Settings.builder() @@ -841,4 +1094,69 @@ private ClusterState updateStatetoTestARS( clusterState.routingTable(routingTableBuilder.build()); return clusterState.build(); } + + private ClusterState updateStatetoTestWeightedRouting( + String[] indices, + int numberOfShards, + int numberOfReplicas, + ClusterState state, + Map> discoveryNodeMap + ) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + Metadata.Builder metadataBuilder = Metadata.builder(); + ClusterState.Builder clusterState = ClusterState.builder(state); + List nodesZoneA = discoveryNodeMap.get("a"); + List nodesZoneB = discoveryNodeMap.get("b"); + List nodesZoneC = discoveryNodeMap.get("c"); + for (String index : indices) { + IndexMetadata indexMetadata = IndexMetadata.builder(index) + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .build(); + metadataBuilder.put(indexMetadata, false).generateClusterUuidIfNeeded(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + for (int i = 0; i < numberOfShards; i++) { + final ShardId shardId = new ShardId(index, "_na_", i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + // Assign all the primary shards on nodes in zone-a (node_a0 or node_a1) + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting( + index, + i, + nodesZoneA.get(randomInt(nodesZoneA.size() - 1)).getId(), + null, + true, + ShardRoutingState.STARTED + ) + ); + for (int replica = 0; replica < numberOfReplicas; replica++) { + // Assign all the replicas on nodes in zone-b (node_b2) + String nodeId = ""; + if (replica == 0) { + nodeId = nodesZoneB.get(randomInt(nodesZoneB.size() - 1)).getId(); + } else { + nodeId = nodesZoneC.get(randomInt(nodesZoneC.size() - 1)).getId(); + } + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(index, i, nodeId, null, false, ShardRoutingState.STARTED) + ); + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + } + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + // add weighted routing weights in metadata + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting); + metadataBuilder.putCustom(WeightedRoutingMetadata.TYPE, weightedRoutingMetadata); + clusterState.metadata(metadataBuilder); + clusterState.routingTable(routingTableBuilder.build()); + return clusterState.build(); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoundRobinTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoundRobinTests.java new file mode 100644 index 0000000000000..5f62d30486e86 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoundRobinTests.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class WeightedRoundRobinTests extends OpenSearchTestCase { + + public void testWeightedRoundRobinOrder() { + // weights set as A:4, B:3, C:2 + List> entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(4, "A")); + entity.add(new WeightedRoundRobin.Entity<>(3, "B")); + entity.add(new WeightedRoundRobin.Entity<>(2, "C")); + WeightedRoundRobin weightedRoundRobin = new WeightedRoundRobin(entity); + List> orderedEntities = weightedRoundRobin.orderEntities(); + List expectedOrdering = Arrays.asList("A", "A", "B", "A", "B", "C", "A", "B", "C"); + List actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set as A:1, B:1, C:0 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(1, "A")); + entity.add(new WeightedRoundRobin.Entity<>(1, "B")); + entity.add(new WeightedRoundRobin.Entity<>(0, "C")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList("A", "B"); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set as A:0, B:0, C:0 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(0, "A")); + entity.add(new WeightedRoundRobin.Entity<>(0, "B")); + entity.add(new WeightedRoundRobin.Entity<>(0, "C")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList(); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set as A:-1, B:0, C:1 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(-1, "A")); + entity.add(new WeightedRoundRobin.Entity<>(0, "B")); + entity.add(new WeightedRoundRobin.Entity<>(1, "C")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList("C"); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set as A:-1, B:3, C:0, D:10 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(-1, "A")); + entity.add(new WeightedRoundRobin.Entity<>(3, "B")); + entity.add(new WeightedRoundRobin.Entity<>(0, "C")); + entity.add(new WeightedRoundRobin.Entity<>(10, "D")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList("B", "D", "B", "D", "B", "D", "D", "D", "D", "D", "D", "D", "D"); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set as A:-1, B:3, C:0, D:10000 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(-1, "A")); + entity.add(new WeightedRoundRobin.Entity<>(3, "B")); + entity.add(new WeightedRoundRobin.Entity<>(0, "C")); + entity.add(new WeightedRoundRobin.Entity<>(10000, "D")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + assertEquals(10003, orderedEntities.size()); + // Count of D's + int countD = 0; + // Count of B's + int countB = 0; + for (WeightedRoundRobin.Entity en : orderedEntities) { + if (en.getTarget().equals("D")) { + countD++; + } else if (en.getTarget().equals("B")) { + countB++; + } + } + assertEquals(3, countB); + assertEquals(10000, countD); + + // weights set C:0 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(0, "C")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList(); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set C:1 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(1, "C")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList("C"); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + + // weights set C:2 + entity = new ArrayList>(); + entity.add(new WeightedRoundRobin.Entity<>(2, "C")); + weightedRoundRobin = new WeightedRoundRobin(entity); + orderedEntities = weightedRoundRobin.orderEntities(); + expectedOrdering = Arrays.asList("C", "C"); + actualOrdering = new ArrayList<>(); + for (WeightedRoundRobin.Entity en : orderedEntities) { + actualOrdering.add(en.getTarget()); + } + assertEquals(expectedOrdering, actualOrdering); + } + +} diff --git a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java index 68ad47fa1bbc9..8f5aa1b764551 100644 --- a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.PlainShardIterator; import org.opensearch.cluster.routing.RotationShardShuffler; @@ -48,11 +49,15 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardShuffler; import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; +import org.opensearch.test.ClusterServiceUtils; +import org.opensearch.threadpool.TestThreadPool; import java.util.Arrays; import java.util.Collections; @@ -497,4 +502,209 @@ public void testReplicaShardPreferenceIters() throws Exception { } } + public void testWeightedRoutingWithDifferentWeights() { + TestThreadPool threadPool = null; + try { + Settings.Builder settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put("cluster.routing.allocation.awareness.attributes", "zone"); + AllocationService strategy = createAllocationService(settings.build()); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .build(); + + RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + threadPool = new TestThreadPool("testThatOnlyNodesSupport"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + + Map node1Attributes = new HashMap<>(); + node1Attributes.put("zone", "zone1"); + Map node2Attributes = new HashMap<>(); + node2Attributes.put("zone", "zone2"); + Map node3Attributes = new HashMap<>(); + node3Attributes.put("zone", "zone3"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("node1", unmodifiableMap(node1Attributes))) + .add(newNode("node2", unmodifiableMap(node2Attributes))) + .add(newNode("node3", unmodifiableMap(node3Attributes))) + .localNodeId("node1") + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + Map weights = Map.of("zone1", 1.0, "zone2", 1.0, "zone3", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ShardIterator shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + + assertEquals(2, shardIterator.size()); + ShardRouting shardRouting; + while (shardIterator.remaining() > 0) { + shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertFalse(Arrays.asList("node3").contains(shardRouting.currentNodeId())); + } + + weights = Map.of("zone1", 1.0, "zone2", 1.0, "zone3", 1.0); + weightedRouting = new WeightedRouting("zone", weights); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(3, shardIterator.size()); + + weights = Map.of("zone1", -1.0, "zone2", 0.0, "zone3", 1.0); + weightedRouting = new WeightedRouting("zone", weights); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(1, shardIterator.size()); + while (shardIterator.remaining() > 0) { + shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertFalse(Arrays.asList("node2", "node1").contains(shardRouting.currentNodeId())); + } + + weights = Map.of("zone1", 0.0, "zone2", 0.0, "zone3", 0.0); + weightedRouting = new WeightedRouting("zone", weights); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(0, shardIterator.size()); + } finally { + terminate(threadPool); + } + } + + public void testWeightedRoutingInMemoryStore() { + TestThreadPool threadPool = null; + try { + Settings.Builder settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put("cluster.routing.allocation.awareness.attributes", "zone"); + AllocationService strategy = createAllocationService(settings.build()); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .build(); + + RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + threadPool = new TestThreadPool("testThatOnlyNodesSupport"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + + Map node1Attributes = new HashMap<>(); + node1Attributes.put("zone", "zone1"); + Map node2Attributes = new HashMap<>(); + node2Attributes.put("zone", "zone2"); + Map node3Attributes = new HashMap<>(); + node3Attributes.put("zone", "zone3"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("node1", unmodifiableMap(node1Attributes))) + .add(newNode("node2", unmodifiableMap(node2Attributes))) + .add(newNode("node3", unmodifiableMap(node3Attributes))) + .localNodeId("node1") + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + Map weights = Map.of("zone1", 1.0, "zone2", 1.0, "zone3", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0); + + assertNull( + indexShardRoutingTable.getActiveShardsByWeight().get(new IndexShardRoutingTable.WeightedRoutingKey(weightedRouting)) + ); + ShardIterator shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(2, shardIterator.size()); + ShardRouting shardRouting; + while (shardIterator.remaining() > 0) { + shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertFalse(Arrays.asList("node3").contains(shardRouting.currentNodeId())); + } + + // Make iterator call with same WeightedRouting instance + assertNotNull( + indexShardRoutingTable.getActiveShardsByWeight().get(new IndexShardRoutingTable.WeightedRoutingKey(weightedRouting)) + ); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(2, shardIterator.size()); + while (shardIterator.remaining() > 0) { + shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertFalse(Arrays.asList("node3").contains(shardRouting.currentNodeId())); + } + + // Make iterator call with new instance of WeightedRouting but same weights + Map weights1 = Map.of("zone1", 1.0, "zone2", 1.0, "zone3", 0.0); + weightedRouting = new WeightedRouting("zone", weights1); + assertNotNull( + indexShardRoutingTable.getActiveShardsByWeight().get(new IndexShardRoutingTable.WeightedRoutingKey(weightedRouting)) + ); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(2, shardIterator.size()); + while (shardIterator.remaining() > 0) { + shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertFalse(Arrays.asList("node3").contains(shardRouting.currentNodeId())); + } + + // Make iterator call with different weights + Map weights2 = Map.of("zone1", 1.0, "zone2", 0.0, "zone3", 1.0); + weightedRouting = new WeightedRouting("zone", weights2); + assertNull( + indexShardRoutingTable.getActiveShardsByWeight().get(new IndexShardRoutingTable.WeightedRoutingKey(weightedRouting)) + ); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1); + assertEquals(2, shardIterator.size()); + while (shardIterator.remaining() > 0) { + shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertFalse(Arrays.asList("node2").contains(shardRouting.currentNodeId())); + } + } finally { + terminate(threadPool); + } + } } From fa07cd9335ecd45d197b99097d5b04a61d4522de Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Mon, 19 Sep 2022 07:50:33 -0700 Subject: [PATCH 59/78] [Javadoc] add missing javadocs for :example-plugin modules (#4540) * Add javadocs for example-plugins:custom-settings module Signed-off-by: Daniel Widdis * Add javadocs for example-plugins:custom-significance-heuristic Signed-off-by: Daniel Widdis * Add javadocs for example-plugins:custom-suggester Signed-off-by: Daniel Widdis * Add javadocs to example-plugins:painless-allowlist Signed-off-by: Daniel Widdis * Add javadocs to example-plugins:rescore Signed-off-by: Daniel Widdis * Add javadocs to example-plugins:rest-handler Signed-off-by: Daniel Widdis * Add javadocs to example-plugins:script-expert-scoring Signed-off-by: Daniel Widdis * Remove exclusions for module which aren't giving javadoc errors Signed-off-by: Daniel Widdis * CI testing apparently needs to instantiate a no-op class Signed-off-by: Daniel Widdis Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + gradle/missing-javadoc.gradle | 10 --- .../ExampleCustomSettingsConfig.java | 32 ++++++- .../ExampleCustomSettingsPlugin.java | 12 ++- .../example/customsettings/package-info.java | 12 +++ .../CustomSignificanceHeuristicPlugin.java | 6 ++ .../customsigheuristic/SimpleHeuristic.java | 12 +++ .../customsigheuristic/package-info.java | 12 +++ .../customsuggester/CustomSuggester.java | 8 ++ .../CustomSuggesterPlugin.java | 9 ++ .../customsuggester/CustomSuggestion.java | 85 +++++++++++++++++++ .../CustomSuggestionBuilder.java | 26 +++++- .../CustomSuggestionContext.java | 11 +++ .../example/customsuggester/package-info.java | 12 +++ .../ExampleAllowlistAnnotationParser.java | 10 ++- .../ExampleAllowlistExtension.java | 5 ++ .../ExampleAllowlistedClass.java | 36 +++++++- .../ExampleAllowlistedInstance.java | 21 +++++ .../ExamplePainlessAnnotation.java | 31 +++++++ .../ExampleStaticMethodClass.java | 16 ++++ .../painlessallowlist/MyAllowlistPlugin.java | 12 ++- .../painlessallowlist/package-info.java | 12 +++ .../rescore/ExampleRescoreBuilder.java | 22 ++++- .../example/rescore/ExampleRescorePlugin.java | 9 ++ .../example/rescore/package-info.java | 12 +++ .../example/resthandler/ExampleCatAction.java | 12 ++- .../resthandler/ExampleRestHandlerPlugin.java | 8 ++ .../example/resthandler/package-info.java | 12 +++ .../expertscript/ExpertScriptPlugin.java | 5 ++ .../example/expertscript/package-info.java | 12 +++ 30 files changed, 460 insertions(+), 23 deletions(-) create mode 100644 plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/package-info.java create mode 100644 plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/package-info.java create mode 100644 plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/package-info.java create mode 100644 plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/package-info.java create mode 100644 plugins/examples/rescore/src/main/java/org/opensearch/example/rescore/package-info.java create mode 100644 plugins/examples/rest-handler/src/main/java/org/opensearch/example/resthandler/package-info.java create mode 100644 plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/package-info.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c610c89eeaf3..a00822872a84f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added missing javadocs for `:distribution:tools` modules ([#4483](https://github.com/opensearch-project/OpenSearch/pull/4483)) - Add BWC version 2.3.1 ([#4513](https://github.com/opensearch-project/OpenSearch/pull/4513)) - [Segment Replication] Add snapshot and restore tests for segment replication feature ([#3993](https://github.com/opensearch-project/OpenSearch/pull/3993)) +- Added missing javadocs for `:example-plugins` modules ([#4540](https://github.com/opensearch-project/OpenSearch/pull/4540)) ### Dependencies - Bumps `reactive-streams` from 1.0.3 to 1.0.4 diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 248a714f4f3e3..a1fde7637796c 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -95,14 +95,6 @@ configure([ project(":client:client-benchmark-noop-api-plugin"), project(":client:rest-high-level"), project(":client:test"), - project(":doc-tools"), - project(":example-plugins:custom-settings"), - project(":example-plugins:custom-significance-heuristic"), - project(":example-plugins:custom-suggester"), - project(":example-plugins:painless-allowlist"), - project(":example-plugins:rescore"), - project(":example-plugins:rest-handler"), - project(":example-plugins:script-expert-scoring"), project(":libs:opensearch-cli"), project(":libs:opensearch-core"), project(":libs:opensearch-dissect"), @@ -155,9 +147,7 @@ configure([ project(":plugins:store-smb"), project(":plugins:transport-nio"), project(":qa:die-with-dignity"), - project(":qa:os"), project(":qa:wildfly"), - project(":rest-api-spec"), project(":test:external-modules:test-delayed-aggs"), project(":test:fixtures:azure-fixture"), project(":test:fixtures:gcs-fixture"), diff --git a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java index 8413a750e2741..cb2e28210faf1 100644 --- a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java +++ b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java @@ -94,8 +94,13 @@ public class ExampleCustomSettingsConfig { private final List list; private final String filtered; + /** + * Instantiate this object based on the specified environment. + * + * @param environment The environment including paths to custom setting configuration files + */ public ExampleCustomSettingsConfig(final Environment environment) { - // Elasticsearch config directory + // OpenSearch config directory final Path configDir = environment.configDir(); // Resolve the plugin's custom settings file @@ -121,22 +126,47 @@ public ExampleCustomSettingsConfig(final Environment environment) { assert secured != null; } + /** + * Gets the value of the custom.simple String setting. + * + * @return the custom.simple value + */ public String getSimple() { return simple; } + /** + * Gets the value of the custom.bool boolean setting. + * + * @return the custom.bool value + */ public Boolean getBool() { return bool; } + /** + * Gets the value of the custom.validated String setting. + * + * @return the custom.validated value + */ public String getValidated() { return validated; } + /** + * Gets the value of the custom.filtered String setting. + * + * @return the custom.filtered value + */ public String getFiltered() { return filtered; } + /** + * Gets the value of the custom.list list of integers setting. + * + * @return the custom.list value + */ public List getList() { return list; } diff --git a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsPlugin.java b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsPlugin.java index aa22938c72a01..0b619102c667f 100644 --- a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsPlugin.java +++ b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsPlugin.java @@ -42,10 +42,19 @@ import static java.util.stream.Collectors.toList; +/** + * An example plugin that includes custom settings. + */ public class ExampleCustomSettingsPlugin extends Plugin { private final ExampleCustomSettingsConfig config; + /** + * Instantiate this plugin with the specified settings and config path. + * + * @param settings The settings for this plugin. + * @param configPath The path to this plugin's configuration files. + */ public ExampleCustomSettingsPlugin(final Settings settings, final Path configPath) { this.config = new ExampleCustomSettingsConfig(new Environment(settings, configPath)); @@ -53,9 +62,6 @@ public ExampleCustomSettingsPlugin(final Settings settings, final Path configPat assert "secret".equals(config.getFiltered()); } - /** - * @return the plugin's custom settings - */ @Override public List> getSettings() { return Arrays.asList( diff --git a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/package-info.java b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/package-info.java new file mode 100644 index 0000000000000..5af8654201da2 --- /dev/null +++ b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Example classes demonstrating the use of custom settings in a plugin. + */ +package org.opensearch.example.customsettings; diff --git a/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java b/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java index 49098ae36e30f..c646592af63cb 100644 --- a/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java +++ b/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicPlugin.java @@ -44,6 +44,12 @@ * Plugin declaring a custom {@link SignificanceHeuristic}. */ public class CustomSignificanceHeuristicPlugin extends Plugin implements SearchPlugin { + + /** + * Instantiate this plugin. + */ + public CustomSignificanceHeuristicPlugin() {}; + @Override public List> getSignificanceHeuristics() { return singletonList(new SignificanceHeuristicSpec<>(SimpleHeuristic.NAME, SimpleHeuristic::new, SimpleHeuristic.PARSER)); diff --git a/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/SimpleHeuristic.java b/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/SimpleHeuristic.java index 8365a56bcfe4e..9458bf5b75feb 100644 --- a/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/SimpleHeuristic.java +++ b/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/SimpleHeuristic.java @@ -44,13 +44,25 @@ * A simple {@linkplain SignificanceHeuristic} used an example of declaring a custom heuristic. */ public class SimpleHeuristic extends SignificanceHeuristic { + /** + * The name of this NamedWriteable heuristic. + */ public static final String NAME = "simple"; + + /** + * The parser with which to deserialize this object from XContent. + */ public static final ObjectParser PARSER = new ObjectParser<>(NAME, SimpleHeuristic::new); + /** + * Instantiates this object. + */ public SimpleHeuristic() {} /** * Read from a stream. + * + * @param in Input to read the value from */ public SimpleHeuristic(StreamInput in) throws IOException { // Nothing to read diff --git a/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/package-info.java b/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/package-info.java new file mode 100644 index 0000000000000..20809857273c4 --- /dev/null +++ b/plugins/examples/custom-significance-heuristic/src/main/java/org/opensearch/example/customsigheuristic/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Example classes demonstrating the use of a custom significance heuristic. + */ +package org.opensearch.example.customsigheuristic; diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggester.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggester.java index da154609e5f2f..05f26a8e401e1 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggester.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggester.java @@ -41,8 +41,16 @@ import java.io.IOException; import java.util.Locale; +/** + * A custom suggester supportiong suggestion-based search. + */ public class CustomSuggester extends Suggester { + /** + * Instantiate this object. + */ + public CustomSuggester() {} + // This is a pretty dumb implementation which returns the original text + fieldName + custom config option + 12 or 123 @Override public Suggest.Suggestion> innerExecute( diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggesterPlugin.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggesterPlugin.java index 5706b654ffbde..b71a90e700d21 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggesterPlugin.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggesterPlugin.java @@ -38,7 +38,16 @@ import java.util.Collections; import java.util.List; +/** + * Plugin demonstrating custom suggestion-based search. + */ public class CustomSuggesterPlugin extends Plugin implements SearchPlugin { + + /** + * Instantiate this class. + */ + public CustomSuggesterPlugin() {} + @Override public List> getSuggesters() { return Collections.singletonList( diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java index 50ee700c3a253..f35fde03d261f 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java @@ -46,19 +46,43 @@ import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; +/** + * The suggestion responses corresponding with the suggestions in the request. + */ public class CustomSuggestion extends Suggest.Suggestion { + /** + * An integer representing the type of the suggestion formerly used for internal serialization over the network. + * + * This class is now serialized as a NamedWriteable and this value only remains for backwards compatibility + */ public static final int TYPE = 999; + /** + * A meaningless value used to test that plugin suggesters can add fields to their Suggestion types. + */ public static final ParseField DUMMY = new ParseField("dummy"); private String dummy; + /** + * Instantiate this object with the specified name, size, and value for the configured field. + * + * @param name The name of the suggestion as is defined in the request. + * @param size The suggested term size specified in request, only used for merging shard responses. + * @param dummy The added custom suggestion type. + */ public CustomSuggestion(String name, int size, String dummy) { super(name, size); this.dummy = dummy; } + /** + * Instantiate this object from a stream. + * + * @param in Input to read the value from + * @throws IOException on failure to read the value. + */ public CustomSuggestion(StreamInput in) throws IOException { super(in); dummy = in.readString(); @@ -85,6 +109,8 @@ public int getWriteableType() { * * This can't be serialized to xcontent because Suggestions appear in xcontent as an array of entries, so there is no place * to add a custom field. But we can still use a custom field internally and use it to define a Suggestion's behavior + * + * @return the value. */ public String getDummy() { return dummy; @@ -95,12 +121,23 @@ protected Entry newEntry(StreamInput in) throws IOException { return new Entry(in); } + /** + * Instantiate a CustomSuggestion from XContent. + * + * @param parser The XContent parser to use + * @param name Tne name of the suggestion + * @return A new CustomSuggestion instance for the specified name. + * @throws IOException on deserialization error. + */ public static CustomSuggestion fromXContent(XContentParser parser, String name) throws IOException { CustomSuggestion suggestion = new CustomSuggestion(name, -1, null); parseEntries(parser, suggestion, Entry::fromXContent); return suggestion; } + /** + * Represents a part from the suggest text with suggested options. + */ public static class Entry extends Suggest.Suggestion.Entry { private static final ObjectParser PARSER = new ObjectParser<>("CustomSuggestionEntryParser", true, Entry::new); @@ -117,13 +154,30 @@ public static class Entry extends Suggest.Suggestion.Entry otherEntry) { @@ -150,6 +206,8 @@ protected void merge(Suggest.Suggestion.Entry