Skip to content

Commit 6de3633

Browse files
author
Divyansh Pandey
committed
fixed flaky integration tests
Signed-off-by: Divyansh Pandey <dpaandey@amazon.com>
1 parent fb77cc7 commit 6de3633

File tree

1 file changed

+23
-2
lines changed

1 file changed

+23
-2
lines changed

server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDeciderRemoteStoreEnabledIT.java

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
package org.opensearch.cluster.routing.allocation.decider;
1010

11-
import org.apache.lucene.tests.util.LuceneTestCase;
11+
import org.opensearch.action.admin.indices.flush.FlushRequest;
1212
import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest;
1313
import org.opensearch.action.support.clustermanager.AcknowledgedResponse;
1414
import org.opensearch.cluster.ClusterState;
@@ -19,14 +19,14 @@
1919
import org.opensearch.test.OpenSearchIntegTestCase;
2020
import org.junit.Before;
2121

22+
import java.util.Arrays;
2223
import java.util.HashMap;
2324
import java.util.List;
2425
import java.util.Map;
2526

2627
import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING;
2728
import static org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_PRIMARY_SHARDS_PER_NODE_SETTING;
2829

29-
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/17693")
3030
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
3131
public class ShardsLimitAllocationDeciderRemoteStoreEnabledIT extends RemoteStoreBaseIntegTestCase {
3232
@Before
@@ -101,6 +101,7 @@ public void testIndexPrimaryShardLimit() throws Exception {
101101
assertTrue("No node should have more than 1 primary shard of test1", count <= 1);
102102
}
103103
});
104+
cleanUp(new String[] { "test1", "test2" });
104105
}
105106

106107
public void testUpdatingIndexPrimaryShardLimit() throws Exception {
@@ -170,6 +171,7 @@ public void testUpdatingIndexPrimaryShardLimit() throws Exception {
170171
assertTrue("No node should have more than 1 primary shard of test1", count <= 1);
171172
}
172173
});
174+
cleanUp(new String[] { "test1" });
173175
}
174176

175177
public void testClusterPrimaryShardLimitss() throws Exception {
@@ -224,6 +226,7 @@ public void testClusterPrimaryShardLimitss() throws Exception {
224226
assertTrue("No node should have more than 1 primary shard", count <= 1);
225227
}
226228
});
229+
cleanUp(new String[] { "test1" });
227230
}
228231

229232
public void testCombinedIndexAndClusterPrimaryShardLimits() throws Exception {
@@ -313,9 +316,27 @@ public void testCombinedIndexAndClusterPrimaryShardLimits() throws Exception {
313316
assertTrue("No node should have more than 3 primary shards total", count <= 3);
314317
}
315318
});
319+
cleanUp(new String[] { "test1", "test2" });
316320
}
317321

318322
private void updateClusterSetting(String setting, int value) {
319323
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(setting, value)).get();
320324
}
325+
326+
private void cleanUp(String[] indices) throws Exception {
327+
logger.info(">>> Starting custom tearDown in ShardsLimitAllocationDeciderRemoteStoreEnabledIT");
328+
try {
329+
// Synchronization: Force flush relevant indices.
330+
logger.info("Attempting to flush indices {} to help sync remote store before cleanup...", Arrays.toString(indices));
331+
332+
FlushRequest flushRequest = new FlushRequest(indices);
333+
flushRequest.force(true); // Force even if no changes detected
334+
flushRequest.waitIfOngoing(true); // Wait if flush already in progress
335+
client().admin().indices().flush(flushRequest).actionGet(); // Use actionGet() or get() to wait
336+
logger.info("Flush request for {} completed.", Arrays.toString(indices));
337+
338+
} catch (Exception e) {
339+
logger.error("Exception during pre-teardown synchronization flush: {} - {}", e.getClass().getName(), e.getMessage(), e);
340+
}
341+
}
321342
}

0 commit comments

Comments
 (0)