Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ public void testRestartPrimary_NoReplicas() throws Exception {
} else {
refresh(INDEX_NAME);
}
Thread.sleep(1000);
Long initialSize = internalCluster().getInstance(Node.class, primary).fileCache().size();
internalCluster().restartNode(primary);
ensureYellow(INDEX_NAME);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ public void testMetadataResponseFromAllNodes() {
for (Map<String, Object> fileMeta : segmentFiles.values()) {
Map<String, Object> files = (Map<String, Object>) fileMeta.get("files");
assertNotNull(files);
assertFalse(files.isEmpty());
if (files.isEmpty()) continue;
for (Object value : files.values()) {
Map<String, Object> meta = (Map<String, Object>) value;
assertThat(meta, allOf(hasKey("original_name"), hasKey("checksum"), hasKey("length")));
Expand Down Expand Up @@ -150,7 +150,7 @@ public void testMetadataResponseAllShards() throws Exception {
for (Map<String, Object> fileMeta : segmentFiles.values()) {
Map<String, Object> files = (Map<String, Object>) fileMeta.get("files");
assertNotNull(files);
assertFalse(files.isEmpty());
if (files.isEmpty()) continue;
for (Object value : files.values()) {
Map<String, Object> meta = (Map<String, Object>) value;
assertThat(meta, allOf(hasKey("original_name"), hasKey("checksum"), hasKey("length")));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,13 @@
import org.opensearch.action.admin.indices.stats.CommonStatsFlags;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.remote.RemoteSegmentStats;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.translog.RemoteTranslogStats;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.junit.Before;

import java.io.IOException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
Expand Down Expand Up @@ -98,12 +101,16 @@ public void testNodesStatsParityWithReplicaShards() throws Exception {
* Ensures that node stats shows 0 values for dedicated cluster manager nodes
* since cluster manager nodes does not participate in indexing
*/
public void testZeroRemoteStatsOnNodesStatsForClusterManager() {
public void testZeroRemoteStatsOnNodesStatsForClusterManager() throws IOException, ExecutionException, InterruptedException {
createIndex(INDEX_NAME, remoteStoreIndexSettings(0));
ensureGreen(INDEX_NAME);
indexSingleDoc(INDEX_NAME);
refresh(INDEX_NAME);

String primaryNode = primaryNodeName(INDEX_NAME);
IndexShard indexShard = getIndexShard(primaryNode, INDEX_NAME);
indexShard.awaitRemoteStoreSync();

NodesStatsResponse nodesStatsResponseForClusterManager = client().admin()
.cluster()
.prepareNodesStats(internalCluster().getClusterManagerName())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,8 +378,8 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr
// Assertions
assertEquals(0, uploadBytesFailed);
for (int j = 0; j < response.getSuccessfulShards() - 1; j++) {
assertTrue(uploadBytesStarted - zeroStatePrimaryStats.get().uploadBytesStarted > downloadBytesStarted.get(j));
assertTrue(uploadBytesSucceeded - zeroStatePrimaryStats.get().uploadBytesSucceeded > downloadBytesSucceeded.get(j));
assertTrue(uploadBytesStarted - zeroStatePrimaryStats.get().uploadBytesStarted == downloadBytesStarted.get(j));
assertTrue(uploadBytesSucceeded - zeroStatePrimaryStats.get().uploadBytesSucceeded == downloadBytesSucceeded.get(j));
assertEquals(0, (long) downloadBytesFailed.get(j));
}
});
Expand All @@ -392,7 +392,7 @@ private void validateZeroStatePrimaryStats(RemoteSegmentTransferTracker.Stats pr
assertEquals(primaryStats.totalUploadsStarted, primaryStats.totalUploadsSucceeded);
assertTrue(primaryStats.totalUploadsSucceeded >= 1);
assertEquals(primaryStats.uploadBytesStarted, primaryStats.uploadBytesSucceeded);
assertTrue(primaryStats.uploadBytesSucceeded > 0);
assertEquals(0, primaryStats.uploadBytesSucceeded);
assertEquals(0, primaryStats.totalUploadsFailed);
assertEquals(0, primaryStats.uploadBytesFailed);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ public void testWritableWarmBasic() throws Exception {
Set<String> filesBeforeMerge = new HashSet<>(Arrays.asList(directory.listAll()));
client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).get();
flushAndRefresh(INDEX_NAME);
Thread.sleep(1000);
Set<String> filesAfterMerge = new HashSet<>(Arrays.asList(directory.listAll()));

Set<String> filesFromPreviousGenStillPresent = filesBeforeMerge.stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -789,6 +789,7 @@ public void apply(Settings value, Settings current, Settings previous) {
RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING,
RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS,
RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA,
RemoteStoreSettings.CLUSTER_REMOTE_STORE_REFRESH_SEGMENT_UPLOAD_DECOUPLE,
RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL,
RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL,
RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;

import static org.opensearch.index.shard.RemoteStoreRefreshListener.EXCLUDE_FILES;
import static org.opensearch.index.shard.RemoteStoreRefreshListener.isFileExcluded;

/**
* Keeps track of remote refresh which happens in {@link org.opensearch.index.shard.RemoteStoreRefreshListener}. This consist of multiple critical metrics.
Expand Down Expand Up @@ -300,6 +300,13 @@ public Map<String, Long> getLatestLocalFileNameLengthMap() {
return Collections.unmodifiableMap(latestLocalFileNameLengthMap);
}

public Map<String, Long> updateLatestLocalFileNameLengthMap(
Collection<String> segmentFiles,
CheckedFunction<String, Long, IOException> fileSizeFunction
) {
return updateLatestLocalFileNameLengthMap(segmentFiles, fileSizeFunction, false);
}

/**
* Updates the latestLocalFileNameLengthMap by adding file name and it's size to the map.
* The method is given a function as an argument which is used for determining the file size (length in bytes).
Expand All @@ -313,7 +320,8 @@ public Map<String, Long> getLatestLocalFileNameLengthMap() {
*/
public Map<String, Long> updateLatestLocalFileNameLengthMap(
Collection<String> segmentFiles,
CheckedFunction<String, Long, IOException> fileSizeFunction
CheckedFunction<String, Long, IOException> fileSizeFunction,
boolean isRefreshSegmentUploadDecouplingEnabled
) {
logger.debug(
"segmentFilesPostRefresh={} latestLocalFileNamesBeforeMapUpdate={}",
Expand All @@ -322,7 +330,7 @@ public Map<String, Long> updateLatestLocalFileNameLengthMap(
);
// Update the map
segmentFiles.stream()
.filter(file -> EXCLUDE_FILES.contains(file) == false)
.filter(file -> isFileExcluded(file, isRefreshSegmentUploadDecouplingEnabled) == false)
.filter(file -> latestLocalFileNameLengthMap.containsKey(file) == false || latestLocalFileNameLengthMap.get(file) == 0)
.forEach(file -> {
long fileSize = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
*
* @opensearch.internal
*/
public class CheckpointRefreshListener extends ReleasableRetryableRefreshListener {
public class CheckpointRefreshListener implements ReferenceManager.RefreshListener {

protected static Logger logger = LogManager.getLogger(CheckpointRefreshListener.class);

Expand All @@ -39,18 +39,12 @@ public void beforeRefresh() throws IOException {
}

@Override
protected boolean performAfterRefreshWithPermit(boolean didRefresh) {
public void afterRefresh(boolean didRefresh) throws IOException {
if (didRefresh
&& shard.state() == IndexShardState.STARTED
&& shard.getReplicationTracker().isPrimaryMode()
&& shard.indexSettings.isAssignedOnRemoteNode() == false) {
publisher.publish(shard, shard.getLatestReplicationCheckpoint());
}
return true;
}

@Override
protected Logger getLogger() {
return logger;
}
}
Loading
Loading