Skip to content

Commit

Permalink
Merge pull request #6695 from planetscale/ds-check-no-db
Browse files Browse the repository at this point in the history
restore: checkNoDB should not require tables to be present
  • Loading branch information
deepthi authored Sep 12, 2020
2 parents 56a3312 + c8d73fd commit 13b895a
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 74 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/docker_test_1.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ jobs:
- name: Check out code
uses: actions/checkout@v2

- name: Run tests which requires docker 1
- name: Run tests which require docker - 1
run: |
go run test.go -docker=true --follow -shard 10
4 changes: 2 additions & 2 deletions .github/workflows/docker_test_2.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: docker test 2
name: docker_test_2
on: [push, pull_request]
jobs:

Expand All @@ -16,6 +16,6 @@ jobs:
- name: Check out code
uses: actions/checkout@v2

- name: Run tests which requires docker - 2
- name: Run tests which require docker - 2
run: |
go run test.go -docker=true --follow -shard 25
1 change: 1 addition & 0 deletions go/test/endtoend/reparent/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ func TestMain(m *testing.M) {
clusterInstance.VtTabletExtraArgs = []string{
"-lock_tables_timeout", "5s",
"-enable_semi_sync",
"-track_schema_versions=false", // remove this line once https://github.com/vitessio/vitess/issues/6474 is fixed
}

// Initialize Cluster
Expand Down
115 changes: 61 additions & 54 deletions go/test/endtoend/reparent/reparent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,31 +39,36 @@ import (
func TestMasterToSpareStateChangeImpossible(t *testing.T) {
defer cluster.PanicHandler(t)

args := []string{"InitTablet", "-hostname", hostname,
"-port", fmt.Sprintf("%d", tablet62344.HTTPPort), "-allow_update", "-parent",
"-keyspace", keyspaceName,
"-shard", shardName,
"-mysql_port", fmt.Sprintf("%d", tablet62344.MySQLPort),
"-grpc_port", fmt.Sprintf("%d", tablet62344.GrpcPort)}
args = append(args, fmt.Sprintf("%s-%010d", tablet62344.Cell, tablet62344.TabletUID), "master")
err := clusterInstance.VtctlclientProcess.ExecuteCommand(args...)
require.NoError(t, err)
// need at least one replica because of semi-sync
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044} {

// Start the tablet
err = tablet62344.VttabletProcess.Setup()
require.NoError(t, err)
// Start the tablet
err := tablet.VttabletProcess.Setup()
require.NoError(t, err)

// Create Database
err = tablet62344.VttabletProcess.CreateDB(keyspaceName)
require.NoError(t, err)
// Create Database
err = tablet.VttabletProcess.CreateDB(keyspaceName)
require.NoError(t, err)
}
for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044} {
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
require.NoError(t, err)
}

// Init Shard Master
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err, out)
// We cannot change a master to spare
err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeReplicaType", tablet62344.Alias, "spare")
require.Error(t, err)
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablet62344.Alias, "spare")
require.Error(t, err, out)
require.Contains(t, out, "type change MASTER -> SPARE is not an allowed transition for ChangeTabletType")

//kill Tablet
//kill Tablets
err = tablet62344.VttabletProcess.TearDown()
require.NoError(t, err)
err = tablet62044.VttabletProcess.TearDown()
require.NoError(t, err)
}

func TestReparentDownMaster(t *testing.T) {
Expand All @@ -89,9 +94,9 @@ func TestReparentDownMaster(t *testing.T) {
}

// Init Shard Master
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand All @@ -115,7 +120,7 @@ func TestReparentDownMaster(t *testing.T) {
require.Error(t, err)

// Run forced reparent operation, this should now proceed unimpeded.
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"EmergencyReparentShard",
"-keyspace_shard", keyspaceShard,
"-new_master", tablet62044.Alias,
Expand Down Expand Up @@ -192,9 +197,9 @@ func TestReparentNoChoiceDownMaster(t *testing.T) {
}

// Init Shard Master
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand All @@ -218,14 +223,14 @@ func TestReparentNoChoiceDownMaster(t *testing.T) {
require.NoError(t, err)

// Run forced reparent operation, this should now proceed unimpeded.
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"EmergencyReparentShard",
"-keyspace_shard", keyspaceShard,
"-wait_replicas_timeout", "30s")
require.NoError(t, err)
require.NoError(t, err, out)

// Check that old master tablet is left around for human intervention.
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate")
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate")
require.Error(t, err)
require.Contains(t, out, "already has master")

Expand Down Expand Up @@ -300,9 +305,9 @@ func TestReparentIgnoreReplicas(t *testing.T) {
}

// Init Shard Master.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.Nil(t, err)
require.Nil(t, err, out)

validateTopology(t, true)

Expand Down Expand Up @@ -332,19 +337,19 @@ func TestReparentIgnoreReplicas(t *testing.T) {
require.Nil(t, err)

// We expect this one to fail because we have an unreachable replica
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"EmergencyReparentShard",
"-keyspace_shard", keyspaceShard,
"-wait_replicas_timeout", "30s")
require.NotNil(t, err)
require.NotNil(t, err, out)

// Now let's run it again, but set the command to ignore the unreachable replica.
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"EmergencyReparentShard",
"-keyspace_shard", keyspaceShard,
"-ignore_replicas", tablet41983.Alias,
"-wait_replicas_timeout", "30s")
require.Nil(t, err)
require.Nil(t, err, out)

// We'll bring back the replica we took down.
tablet41983.MysqlctlProcess.InitMysql = false
Expand Down Expand Up @@ -419,20 +424,20 @@ func TestReparentCrossCell(t *testing.T) {
}

// Force the replica to reparent assuming that all the datasets are identical.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

checkMasterTablet(t, tablet62344)

// Perform a graceful reparent operation to another cell.
err = clusterInstance.VtctlclientProcess.ExecuteCommand(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"PlannedReparentShard",
"-keyspace_shard", keyspaceShard,
"-new_master", tablet31981.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, false)

Expand Down Expand Up @@ -463,9 +468,9 @@ func TestReparentGraceful(t *testing.T) {
}

// Force the replica to reparent assuming that all the datasets are identical.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand Down Expand Up @@ -540,9 +545,9 @@ func TestReparentReplicaOffline(t *testing.T) {
}

// Force the replica to reparent assuming that all the datasets are identical.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", keyspaceShard, tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand All @@ -553,7 +558,7 @@ func TestReparentReplicaOffline(t *testing.T) {
require.NoError(t, err)

// Perform a graceful reparent operation.
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"PlannedReparentShard",
"-keyspace_shard", keyspaceShard,
"-new_master", tablet62044.Alias,
Expand All @@ -569,9 +574,11 @@ func TestReparentReplicaOffline(t *testing.T) {

func TestReparentAvoid(t *testing.T) {
defer cluster.PanicHandler(t)

// Remove tablet41983 from topology as that tablet is not required for this test
err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias)
require.NoError(t, err)
// Ignore error. Depending on previous tests this topo entry may or may not exist
// TODO: fix inter-test dependencies
_ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet41983.Alias)

for _, tablet := range []cluster.Vttablet{*tablet62344, *tablet62044, *tablet31981} {
// create database
Expand All @@ -589,9 +596,9 @@ func TestReparentAvoid(t *testing.T) {
}

// Force the replica to reparent assuming that all the dataset's are identical.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", keyspaceShard, tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand Down Expand Up @@ -686,9 +693,9 @@ func reparentFromOutside(t *testing.T, downMaster bool) {
}

// Reparent as a starting point
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand Down Expand Up @@ -774,9 +781,9 @@ func TestReparentWithDownReplica(t *testing.T) {
}

// Init Shard Master
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand Down Expand Up @@ -848,9 +855,9 @@ func TestChangeTypeSemiSync(t *testing.T) {
}

// Init Shard Master
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), master.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

// Updated rdonly tablet and set tablet type to rdonly
// TODO: replace with ChangeTabletType once ChangeSlaveType is removed
Expand Down Expand Up @@ -927,9 +934,9 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) {
}

// Init Shard Master
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablet62344.Alias)
require.NoError(t, err)
require.NoError(t, err, out)

validateTopology(t, true)

Expand All @@ -941,7 +948,7 @@ func TestReparentDoesntHangIfMasterFails(t *testing.T) {

// Perform a planned reparent operation, the master will fail the
// insert. The replicas should then abort right away.
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"PlannedReparentShard",
"-keyspace_shard", keyspaceShard,
"-new_master", tablet62044.Alias)
Expand Down
17 changes: 3 additions & 14 deletions go/vt/mysqlctl/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"golang.org/x/net/context"

"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/mysqlctl/backupstorage"
"vitess.io/vitess/go/vt/proto/vtrpc"
Expand Down Expand Up @@ -136,33 +135,23 @@ func Backup(ctx context.Context, params BackupParams) error {
// checkNoDB makes sure there is no user data already there.
// Used by Restore, as we do not want to destroy an existing DB.
// The user's database name must be given since we ignore all others.
// Returns true if the specified DB either doesn't exist, or has no tables.
// Returns (true, nil) if the specified DB doesn't exist.
// Returns (false, nil) if the check succeeds but the condition is not
// satisfied (there is a DB with tables).
// Returns non-nil error if one occurs while trying to perform the check.
// satisfied (there is a DB).
// Returns (false, non-nil error) if one occurs while trying to perform the check.
func checkNoDB(ctx context.Context, mysqld MysqlDaemon, dbName string) (bool, error) {
qr, err := mysqld.FetchSuperQuery(ctx, "SHOW DATABASES")
if err != nil {
return false, vterrors.Wrap(err, "checkNoDB failed")
}

backtickDBName := sqlescape.EscapeID(dbName)
for _, row := range qr.Rows {
if row[0].ToString() == dbName {
tableQr, err := mysqld.FetchSuperQuery(ctx, "SHOW TABLES FROM "+backtickDBName)
if err != nil {
return false, vterrors.Wrap(err, "checkNoDB failed")
}
if len(tableQr.Rows) == 0 {
// no tables == empty db, all is well
continue
}
// found active db
log.Warningf("checkNoDB failed, found active db %v", dbName)
return false, nil
}
}

return true, nil
}

Expand Down
6 changes: 3 additions & 3 deletions test/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/tabletgateway/buffer"],
"Command": [],
"Manual": false,
"Shard": 14,
"Shard": 13,
"RetryMax": 0,
"Tags": []
},
Expand All @@ -349,7 +349,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/tabletgateway/cellalias"],
"Command": [],
"Manual": false,
"Shard": 14,
"Shard": 13,
"RetryMax": 0,
"Tags": []
},
Expand All @@ -358,7 +358,7 @@
"Args": ["vitess.io/vitess/go/test/endtoend/tabletgateway"],
"Command": [],
"Manual": false,
"Shard": 14,
"Shard": 15,
"RetryMax": 0,
"Tags": []
},
Expand Down

0 comments on commit 13b895a

Please sign in to comment.