diff --git a/go/test/endtoend/reparent/main_test.go b/go/test/endtoend/reparent/main_test.go index c3600e95ad4..87a8a95abae 100644 --- a/go/test/endtoend/reparent/main_test.go +++ b/go/test/endtoend/reparent/main_test.go @@ -111,6 +111,7 @@ func TestMain(m *testing.M) { clusterInstance.VtTabletExtraArgs = []string{ "-lock_tables_timeout", "5s", "-enable_semi_sync", + "-track_schema_versions=false", // remove this line once https://github.com/vitessio/vitess/issues/6474 is fixed } // Initialize Cluster diff --git a/go/test/endtoend/reparent/reparent_test.go b/go/test/endtoend/reparent/reparent_test.go index bc5933e0636..a2a52fbf0c1 100644 --- a/go/test/endtoend/reparent/reparent_test.go +++ b/go/test/endtoend/reparent/reparent_test.go @@ -39,31 +39,36 @@ import ( func TestMasterToSpareStateChangeImpossible(t *testing.T) { defer cluster.PanicHandler(t) - args := []string{"InitTablet", "-hostname", hostname, - "-port", fmt.Sprintf("%d", tablet62344.HTTPPort), "-allow_update", "-parent", - "-keyspace", keyspaceName, - "-shard", shardName, - "-mysql_port", fmt.Sprintf("%d", tablet62344.MySQLPort), - "-grpc_port", fmt.Sprintf("%d", tablet62344.GrpcPort)} - args = append(args, fmt.Sprintf("%s-%010d", tablet62344.Cell, tablet62344.TabletUID), "master") - err := clusterInstance.VtctlclientProcess.ExecuteCommand(args...) - require.NoError(t, err) + // need at least one replica because of semi-sync + for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044} { - // Start the tablet - err = tablet62344.VttabletProcess.Setup() - require.NoError(t, err) + // Start the tablet + err := tablet.VttabletProcess.Setup() + require.NoError(t, err) - // Create Database - err = tablet62344.VttabletProcess.CreateDB(keyspaceName) - require.NoError(t, err) + // Create Database + err = tablet.VttabletProcess.CreateDB(keyspaceName) + require.NoError(t, err) + } + for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044} { + err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"}) + require.NoError(t, err) + } + // Init Shard Master + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", + "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) + require.NoError(t, err, out) // We cannot change a master to spare - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeReplicaType", tablet62344.Alias, "spare") - require.Error(t, err) + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablet62344.Alias, "spare") + require.Error(t, err, out) + require.Contains(t, out, "type change MASTER -> SPARE is not an allowed transition for ChangeTabletType") - //kill Tablet + //kill Tablets err = tablet62344.VttabletProcess.TearDown() require.NoError(t, err) + err = tablet62044.VttabletProcess.TearDown() + require.NoError(t, err) } func TestReparentDownMaster(t *testing.T) { @@ -89,9 +94,9 @@ func TestReparentDownMaster(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -115,7 +120,7 @@ func TestReparentDownMaster(t *testing.T) { require.Error(t, err) // Run forced reparent operation, this should now proceed unimpeded. - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "EmergencyReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet62044.Alias, @@ -192,9 +197,9 @@ func TestReparentNoChoiceDownMaster(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -218,14 +223,14 @@ func TestReparentNoChoiceDownMaster(t *testing.T) { require.NoError(t, err) // Run forced reparent operation, this should now proceed unimpeded. - err = clusterInstance.VtctlclientProcess.ExecuteCommand( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "EmergencyReparentShard", "-keyspace_shard", keyspaceShard, "-wait_replicas_timeout", "30s") - require.NoError(t, err) + require.NoError(t, err, out) // Check that old master tablet is left around for human intervention. - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") require.Error(t, err) require.Contains(t, out, "already has master") @@ -300,9 +305,9 @@ func TestReparentIgnoreReplicas(t *testing.T) { } // Init Shard Master. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.Nil(t, err) + require.Nil(t, err, out) validateTopology(t, true) @@ -332,19 +337,19 @@ func TestReparentIgnoreReplicas(t *testing.T) { require.Nil(t, err) // We expect this one to fail because we have an unreachable replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "EmergencyReparentShard", "-keyspace_shard", keyspaceShard, "-wait_replicas_timeout", "30s") - require.NotNil(t, err) + require.NotNil(t, err, out) // Now let's run it again, but set the command to ignore the unreachable replica. - err = clusterInstance.VtctlclientProcess.ExecuteCommand( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "EmergencyReparentShard", "-keyspace_shard", keyspaceShard, "-ignore_replicas", tablet41983.Alias, "-wait_replicas_timeout", "30s") - require.Nil(t, err) + require.Nil(t, err, out) // We'll bring back the replica we took down. tablet41983.MysqlctlProcess.InitMysql = false @@ -419,20 +424,20 @@ func TestReparentCrossCell(t *testing.T) { } // Force the replica to reparent assuming that all the datasets are identical. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) checkMasterTablet(t, tablet62344) // Perform a graceful reparent operation to another cell. - err = clusterInstance.VtctlclientProcess.ExecuteCommand( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "PlannedReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet31981.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, false) @@ -463,9 +468,9 @@ func TestReparentGraceful(t *testing.T) { } // Force the replica to reparent assuming that all the datasets are identical. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -540,9 +545,9 @@ func TestReparentReplicaOffline(t *testing.T) { } // Force the replica to reparent assuming that all the datasets are identical. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", keyspaceShard, tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -553,7 +558,7 @@ func TestReparentReplicaOffline(t *testing.T) { require.NoError(t, err) // Perform a graceful reparent operation. - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "PlannedReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet62044.Alias, @@ -569,9 +574,11 @@ func TestReparentReplicaOffline(t *testing.T) { func TestReparentAvoid(t *testing.T) { defer cluster.PanicHandler(t) + // Remove tablet41983 from topology as that tablet is not required for this test - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias) - require.NoError(t, err) + // Ignore error. Depending on previous tests this topo entry may or may not exist + // TODO: fix inter-test dependencies + _ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias) for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet31981} { // create database @@ -589,9 +596,9 @@ func TestReparentAvoid(t *testing.T) { } // Force the replica to reparent assuming that all the dataset's are identical. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", keyspaceShard, tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -686,9 +693,9 @@ func reparentFromOutside(t *testing.T, downMaster bool) { } // Reparent as a starting point - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -774,9 +781,9 @@ func TestReparentWithDownReplica(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -848,9 +855,9 @@ func TestChangeTypeSemiSync(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), master.Alias) - require.NoError(t, err) + require.NoError(t, err, out) // Updated rdonly tablet and set tablet type to rdonly // TODO: replace with ChangeTabletType once ChangeSlaveType is removed @@ -927,9 +934,9 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -941,7 +948,7 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) { // Perform a planned reparent operation, the master will fail the // insert. The replicas should then abort right away. - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "PlannedReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet62044.Alias)