Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace internal usages of 'master' term in 'server/src/internalClusterTest' directory #2521

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception {
}

// restart the cluster but prevent it from performing state recovery
final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "master:true").get().getNodes().size();
final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "cluster_manager:true").get().getNodes().size();
internalCluster().fullRestart(new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) {
Expand All @@ -107,7 +107,7 @@ public boolean validateClusterForming() {
assertNotNull(client().admin().cluster().preparePendingClusterTasks().get().getPendingTasks());

// starting one more node allows the cluster to recover
internalCluster().startDataOnlyNode(); // cannot update minimum_master_nodes before the cluster has formed
internalCluster().startDataOnlyNode(); // cannot update minimum_cluster_manager_nodes before the cluster has formed
ensureGreen();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
import static org.hamcrest.Matchers.equalTo;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false)
public class IndexingMasterFailoverIT extends OpenSearchIntegTestCase {
public class IndexingClusterManagerFailoverIT extends OpenSearchIntegTestCase {

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
Expand All @@ -58,12 +58,12 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
}

/**
* Indexing operations which entail mapping changes require a blocking request to the master node to update the mapping.
* If the master node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits.
* This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario.
* Indexing operations which entail mapping changes require a blocking request to the cluster-manager node to update the mapping.
* If the cluster-manager node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits.
* This retry logic is implemented in TransportMasterNodeAction and tested by the following cluster-manager failover scenario.
*/
public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable {
logger.info("--> start 4 nodes, 3 master, 1 data");
public void testClusterManagerFailoverDuringIndexingWithMappingChanges() throws Throwable {
logger.info("--> start 4 nodes, 3 cluster-manager, 1 data");

internalCluster().setBootstrapClusterManagerNodeIndex(2);

Expand All @@ -74,7 +74,7 @@ public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwabl
logger.info("--> wait for all nodes to join the cluster");
ensureStableCluster(4);

// We index data with mapping changes into cluster and have master failover at same time
// We index data with mapping changes into cluster and have cluster-manager failover at same time
client().admin()
.indices()
.prepareCreate("myindex")
Expand Down Expand Up @@ -108,14 +108,14 @@ public void run() {

barrier.await();

// interrupt communication between master and other nodes in cluster
NetworkDisruption partition = isolateMasterDisruption(NetworkDisruption.DISCONNECT);
// interrupt communication between cluster-manager and other nodes in cluster
NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT);
internalCluster().setDisruptionScheme(partition);

logger.info("--> disrupting network");
partition.startDisrupting();

logger.info("--> waiting for new master to be elected");
logger.info("--> waiting for new cluster-manager to be elected");
ensureStableCluster(3, dataNode);

partition.stopDisrupting();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ public class ClusterHealthIT extends OpenSearchIntegTestCase {

public void testSimpleLocalHealth() {
createIndex("test");
ensureGreen(); // master should think it's green now.
ensureGreen(); // cluster-manager should think it's green now.

for (final String node : internalCluster().getNodeNames()) {
// a very high time out, which should never fire due to the local flag
Expand Down Expand Up @@ -336,7 +336,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
assertFalse(client().admin().cluster().prepareHealth("index").setWaitForGreenStatus().get().isTimedOut());

// at this point the original health response should not have returned: there was never a point where the index was green AND
// the master had processed all pending tasks above LANGUID priority.
// the cluster-manager had processed all pending tasks above LANGUID priority.
assertFalse(healthResponseFuture.isDone());
keepSubmittingTasks.set(false);
assertFalse(healthResponseFuture.actionGet(TimeValue.timeValueSeconds(30)).isTimedOut());
Expand All @@ -346,14 +346,14 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
}
}

public void testHealthOnMasterFailover() throws Exception {
public void testHealthOnClusterManagerFailover() throws Exception {
final String node = internalCluster().startDataOnlyNode();
final boolean withIndex = randomBoolean();
if (withIndex) {
// Create index with many shards to provoke the health request to wait (for green) while master is being shut down.
// Notice that this is set to 0 after the test completed starting a number of health requests and master restarts.
// Create index with many shards to provoke the health request to wait (for green) while cluster-manager is being shut down.
// Notice that this is set to 0 after the test completed starting a number of health requests and cluster-manager restarts.
// This ensures that the cluster is yellow when the health request is made, making the health request wait on the observer,
// triggering a call to observer.onClusterServiceClose when master is shutdown.
// triggering a call to observer.onClusterServiceClose when cluster-manager is shutdown.
createIndex(
"test",
Settings.builder()
Expand All @@ -364,8 +364,8 @@ public void testHealthOnMasterFailover() throws Exception {
);
}
final List<ActionFuture<ClusterHealthResponse>> responseFutures = new ArrayList<>();
// Run a few health requests concurrent to master fail-overs against a data-node to make sure master failover is handled
// without exceptions
// Run a few health requests concurrent to cluster-manager fail-overs against a data-node
// to make sure cluster-manager failover is handled without exceptions
final int iterations = withIndex ? 10 : 20;
for (int i = 0; i < iterations; ++i) {
responseFutures.add(
Expand Down Expand Up @@ -394,7 +394,7 @@ public void testHealthOnMasterFailover() throws Exception {
}
}

public void testWaitForEventsTimesOutIfMasterBusy() {
public void testWaitForEventsTimesOutIfClusterManagerBusy() {
final AtomicBoolean keepSubmittingTasks = new AtomicBoolean(true);
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName());
final PlainActionFuture<Void> completionFuture = new PlainActionFuture<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ public void testClusterInfoServiceCollectsInformation() {
}
ensureGreen(indexName);
InternalTestCluster internalTestCluster = internalCluster();
// Get the cluster info service on the master node
// Get the cluster info service on the cluster-manager node
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(
ClusterInfoService.class,
internalTestCluster.getMasterName()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
public class ClusterStateDiffIT extends OpenSearchIntegTestCase {
public void testClusterStateDiffSerialization() throws Exception {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables());
DiscoveryNode clusterManagerNode = randomNode("master");
DiscoveryNode clusterManagerNode = randomNode("cluster-manager");
DiscoveryNode otherNode = randomNode("other");
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder()
.add(clusterManagerNode)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
import static org.hamcrest.Matchers.nullValue;

@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false)
public class MinimumMasterNodesIT extends OpenSearchIntegTestCase {
public class MinimumClusterManagerNodesIT extends OpenSearchIntegTestCase {

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
Expand All @@ -83,7 +83,7 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
return classes;
}

public void testTwoNodesNoMasterBlock() throws Exception {
public void testTwoNodesNoClusterManagerBlock() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(1);

Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
Expand Down Expand Up @@ -151,13 +151,13 @@ public void testTwoNodesNoMasterBlock() throws Exception {
);
}

String masterNode = internalCluster().getMasterName();
String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name;
logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected");
String clusterManagerNode = internalCluster().getMasterName();
String otherNode = node1Name.equals(clusterManagerNode) ? node2Name : node1Name;
logger.info("--> add voting config exclusion for non-cluster-manager node, to be sure it's not elected");
client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(otherNode)).get();
logger.info("--> stop master node, no cluster-manager block should appear");
Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode));
logger.info("--> stop cluster-manager node, no cluster-manager block should appear");
Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNode));

assertBusy(() -> {
ClusterState clusterState = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
Expand All @@ -170,8 +170,8 @@ public void testTwoNodesNoMasterBlock() throws Exception {
assertThat(state.nodes().getSize(), equalTo(2));
assertThat(state.nodes().getMasterNode(), equalTo(null));

logger.info("--> starting the previous master node again...");
node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build());
logger.info("--> starting the previous cluster-manager node again...");
node2Name = internalCluster().startNode(Settings.builder().put(settings).put(clusterManagerDataPathSettings).build());

clusterHealthResponse = client().admin()
.cluster()
Expand Down Expand Up @@ -204,11 +204,11 @@ public void testTwoNodesNoMasterBlock() throws Exception {
clearRequest.setWaitForRemoval(false);
client().execute(ClearVotingConfigExclusionsAction.INSTANCE, clearRequest).get();

masterNode = internalCluster().getMasterName();
otherNode = node1Name.equals(masterNode) ? node2Name : node1Name;
logger.info("--> add voting config exclusion for master node, to be sure it's not elected");
client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(masterNode)).get();
logger.info("--> stop non-master node, no cluster-manager block should appear");
clusterManagerNode = internalCluster().getMasterName();
otherNode = node1Name.equals(clusterManagerNode) ? node2Name : node1Name;
logger.info("--> add voting config exclusion for cluster-manager node, to be sure it's not elected");
client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(clusterManagerNode)).get();
logger.info("--> stop non-cluster-manager node, no cluster-manager block should appear");
Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode));

Expand All @@ -217,7 +217,7 @@ public void testTwoNodesNoMasterBlock() throws Exception {
assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true));
});

logger.info("--> starting the previous master node again...");
logger.info("--> starting the previous cluster-manager node again...");
internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build());

ensureGreen();
Expand Down Expand Up @@ -249,7 +249,7 @@ public void testTwoNodesNoMasterBlock() throws Exception {
}
}

public void testThreeNodesNoMasterBlock() throws Exception {
public void testThreeNodesNoClusterManagerBlock() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(2);

Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
Expand Down Expand Up @@ -312,8 +312,8 @@ public void testThreeNodesNoMasterBlock() throws Exception {
List<String> nonClusterManagerNodes = new ArrayList<>(
Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), Collections.singleton(internalCluster().getMasterName()))
);
Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0));
Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1));
Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0));
Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1));
internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();

Expand All @@ -325,7 +325,7 @@ public void testThreeNodesNoMasterBlock() throws Exception {
});

logger.info("--> start back the 2 nodes ");
internalCluster().startNodes(nonMasterDataPathSettings1, nonMasterDataPathSettings2);
internalCluster().startNodes(nonClusterManagerDataPathSettings1, nonClusterManagerDataPathSettings2);

internalCluster().validateClusterFormed();
ensureGreen();
Expand All @@ -347,17 +347,17 @@ public void testCannotCommitStateThreeNodes() throws Exception {
internalCluster().startNodes(3, settings);
ensureStableCluster(3);

final String master = internalCluster().getMasterName();
final String clusterManager = internalCluster().getMasterName();
Set<String> otherNodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames()));
otherNodes.remove(master);
NetworkDisruption partition = isolateMasterDisruption(NetworkDisruption.DISCONNECT);
otherNodes.remove(clusterManager);
NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT);
internalCluster().setDisruptionScheme(partition);

final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Exception> failure = new AtomicReference<>();
logger.debug("--> submitting for cluster state to be rejected");
final ClusterService masterClusterService = internalCluster().clusterService(master);
masterClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
final ClusterService clusterManagerClusterService = internalCluster().clusterService(clusterManager);
clusterManagerClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
Expand Down Expand Up @@ -387,11 +387,11 @@ public void onFailure(String source, Exception e) {
assertThat(failure.get(), instanceOf(FailedToCommitClusterStateException.class));

logger.debug("--> check that there is no cluster-manager in minor partition");
assertBusy(() -> assertThat(masterClusterService.state().nodes().getMasterNode(), nullValue()));
assertBusy(() -> assertThat(clusterManagerClusterService.state().nodes().getMasterNode(), nullValue()));

// let major partition to elect new master, to ensure that old master is not elected once partition is restored,
// otherwise persistent setting (which is a part of accepted state on old master) will be propagated to other nodes
logger.debug("--> wait for master to be elected in major partition");
// let major partition to elect new cluster-manager, to ensure that old cluster-manager is not elected once partition is restored,
// otherwise persistent setting (which is a part of accepted state on old cluster-manager) will be propagated to other nodes
logger.debug("--> wait for cluster-manager to be elected in major partition");
assertBusy(() -> {
DiscoveryNode clusterManagerNode = internalCluster().client(randomFrom(otherNodes))
.admin()
Expand All @@ -403,7 +403,7 @@ public void onFailure(String source, Exception e) {
.nodes()
.getMasterNode();
assertThat(clusterManagerNode, notNullValue());
assertThat(clusterManagerNode.getName(), not(equalTo(master)));
assertThat(clusterManagerNode.getName(), not(equalTo(clusterManager)));
});

partition.stopDisrupting();
Expand All @@ -414,7 +414,7 @@ public void onFailure(String source, Exception e) {
for (String node : internalCluster().getNodeNames()) {
Settings nodeSetting = internalCluster().clusterService(node).state().metadata().settings();
assertThat(
node + " processed the cluster state despite of a min master node violation",
node + " processed the cluster state despite of a min cluster-manager node violation",
nodeSetting.get("_SHOULD_NOT_BE_THERE_"),
nullValue()
);
Expand Down
Loading