diff --git a/go/test/endtoend/reparent/main_test.go b/go/test/endtoend/reparent/main_test.go index c3600e95ad4..e4fa644e5f2 100644 --- a/go/test/endtoend/reparent/main_test.go +++ b/go/test/endtoend/reparent/main_test.go @@ -111,6 +111,7 @@ func TestMain(m *testing.M) { clusterInstance.VtTabletExtraArgs = []string{ "-lock_tables_timeout", "5s", "-enable_semi_sync", + "-track_schema_versions=false", // remove this line once https://github.com/vitessio/vitess/issues/6474 is fixed } // Initialize Cluster @@ -124,12 +125,11 @@ func TestMain(m *testing.M) { for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { log.Infof("Starting MySql for tablet %v", tablet.Alias) - if proc, err := tablet.MysqlctlProcess.StartProcess(); err != nil { + proc, err := tablet.MysqlctlProcess.StartProcess() + if err != nil { return 1 - } else { - // ignore golint warning, we need the else block to use proc - mysqlCtlProcessList = append(mysqlCtlProcessList, proc) } + mysqlCtlProcessList = append(mysqlCtlProcessList, proc) } } diff --git a/go/test/endtoend/reparent/reparent_test.go b/go/test/endtoend/reparent/reparent_test.go index 33d3bd8a9bb..e94960ad2c9 100644 --- a/go/test/endtoend/reparent/reparent_test.go +++ b/go/test/endtoend/reparent/reparent_test.go @@ -40,31 +40,36 @@ import ( func TestMasterToSpareStateChangeImpossible(t *testing.T) { defer cluster.PanicHandler(t) - args := []string{"InitTablet", "-hostname", hostname, - "-port", fmt.Sprintf("%d", tablet62344.HTTPPort), "-allow_update", "-parent", - "-keyspace", keyspaceName, - "-shard", shardName, - "-mysql_port", fmt.Sprintf("%d", tablet62344.MySQLPort), - "-grpc_port", fmt.Sprintf("%d", tablet62344.GrpcPort)} - args = append(args, fmt.Sprintf("%s-%010d", tablet62344.Cell, tablet62344.TabletUID), "master") - err := clusterInstance.VtctlclientProcess.ExecuteCommand(args...) - require.NoError(t, err) + // need at least one replica because of semi-sync + for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044} { - // Start the tablet - err = tablet62344.VttabletProcess.Setup() - require.NoError(t, err) + // Start the tablet + err := tablet.VttabletProcess.Setup() + require.NoError(t, err) - // Create Database - err = tablet62344.VttabletProcess.CreateDB(keyspaceName) - require.NoError(t, err) + // Create Database + err = tablet.VttabletProcess.CreateDB(keyspaceName) + require.NoError(t, err) + } + for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044} { + err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"}) + require.NoError(t, err) + } + // Init Shard Master + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", + "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) + require.NoError(t, err, out) // We cannot change a master to spare - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeReplicaType", tablet62344.Alias, "spare") - require.Error(t, err) + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablet62344.Alias, "spare") + require.Error(t, err, out) + require.Contains(t, out, "type change MASTER -> SPARE is not an allowed transition for ChangeTabletType") - //kill Tablet + //kill Tablets err = tablet62344.VttabletProcess.TearDown() require.NoError(t, err) + err = tablet62044.VttabletProcess.TearDown() + require.NoError(t, err) } func TestReparentDownMaster(t *testing.T) { @@ -90,9 +95,9 @@ func TestReparentDownMaster(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -171,20 +176,20 @@ func TestReparentCrossCell(t *testing.T) { } // Force the replica to reparent assuming that all the datasets are identical. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) checkMasterTablet(t, tablet62344) // Perform a graceful reparent operation to another cell. - err = clusterInstance.VtctlclientProcess.ExecuteCommand( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "PlannedReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet31981.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, false) @@ -215,9 +220,9 @@ func TestReparentGraceful(t *testing.T) { } // Force the replica to reparent assuming that all the datasets are identical. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -292,9 +297,9 @@ func TestReparentReplicaOffline(t *testing.T) { } // Force the replica to reparent assuming that all the datasets are identical. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", keyspaceShard, tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -305,7 +310,7 @@ func TestReparentReplicaOffline(t *testing.T) { require.NoError(t, err) // Perform a graceful reparent operation. - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "PlannedReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet62044.Alias, @@ -321,9 +326,11 @@ func TestReparentReplicaOffline(t *testing.T) { func TestReparentAvoid(t *testing.T) { defer cluster.PanicHandler(t) + // Remove tablet41983 from topology as that tablet is not required for this test - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias) - require.NoError(t, err) + // Ignore error. Depending on previous tests this topo entry may or may not exist + // TODO: fix inter-test dependencies + _ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias) for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet31981} { // create database @@ -341,9 +348,9 @@ func TestReparentAvoid(t *testing.T) { } // Force the replica to reparent assuming that all the dataset's are identical. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", keyspaceShard, tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -438,9 +445,9 @@ func reparentFromOutside(t *testing.T, downMaster bool) { } // Reparent as a starting point - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -526,9 +533,9 @@ func TestReparentWithDownReplica(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -600,9 +607,9 @@ func TestChangeTypeSemiSync(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), master.Alias) - require.NoError(t, err) + require.NoError(t, err, out) // Updated rdonly tablet and set tablet type to rdonly // TODO: replace with ChangeTabletType once ChangeSlaveType is removed @@ -679,9 +686,9 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) { } // Init Shard Master - err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster", + out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster", "-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias) - require.NoError(t, err) + require.NoError(t, err, out) validateTopology(t, true) @@ -693,7 +700,7 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) { // Perform a planned reparent operation, the master will fail the // insert. The replicas should then abort right away. - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "PlannedReparentShard", "-keyspace_shard", keyspaceShard, "-new_master", tablet62044.Alias) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index aade6fc0ab2..be60d7683f6 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -27,7 +27,6 @@ import ( "golang.org/x/net/context" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -136,33 +135,23 @@ func Backup(ctx context.Context, params BackupParams) error { // checkNoDB makes sure there is no user data already there. // Used by Restore, as we do not want to destroy an existing DB. // The user's database name must be given since we ignore all others. -// Returns true if the specified DB either doesn't exist, or has no tables. +// Returns (true, nil) if the specified DB doesn't exist. // Returns (false, nil) if the check succeeds but the condition is not -// satisfied (there is a DB with tables). -// Returns non-nil error if one occurs while trying to perform the check. +// satisfied (there is a DB). +// Returns (false, non-nil error) if one occurs while trying to perform the check. func checkNoDB(ctx context.Context, mysqld MysqlDaemon, dbName string) (bool, error) { qr, err := mysqld.FetchSuperQuery(ctx, "SHOW DATABASES") if err != nil { return false, vterrors.Wrap(err, "checkNoDB failed") } - backtickDBName := sqlescape.EscapeID(dbName) for _, row := range qr.Rows { if row[0].ToString() == dbName { - tableQr, err := mysqld.FetchSuperQuery(ctx, "SHOW TABLES FROM "+backtickDBName) - if err != nil { - return false, vterrors.Wrap(err, "checkNoDB failed") - } - if len(tableQr.Rows) == 0 { - // no tables == empty db, all is well - continue - } // found active db log.Warningf("checkNoDB failed, found active db %v", dbName) return false, nil } } - return true, nil }