diff --git a/clientv3/integration/kv_test.go b/clientv3/integration/kv_test.go index 20589d015fe0..69948a856bb9 100644 --- a/clientv3/integration/kv_test.go +++ b/clientv3/integration/kv_test.go @@ -938,29 +938,3 @@ func TestKVPutAtMostOnce(t *testing.T) { t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0]) } } - -func TestKVSwitchUnavailable(t *testing.T) { - defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true}) - defer clus.Terminate(t) - - clus.Members[0].InjectPartition(t, clus.Members[1:]...) - // try to connect with dead node in the endpoint list - cfg := clientv3.Config{ - Endpoints: []string{ - clus.Members[0].GRPCAddr(), - clus.Members[1].GRPCAddr(), - }, - DialTimeout: 1 * time.Second} - cli, err := clientv3.New(cfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - timeout := 3 * clus.Members[0].ServerConfig.ReqTimeout() - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - if _, err := cli.Get(ctx, "abc"); err != nil { - t.Fatal(err) - } - cancel() -} diff --git a/clientv3/integration/network_partition_test.go b/clientv3/integration/network_partition_test.go index 50d9d418c46f..a71c4cea3e8b 100644 --- a/clientv3/integration/network_partition_test.go +++ b/clientv3/integration/network_partition_test.go @@ -146,6 +146,51 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c } } +// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer +// switches endpoint when leader fails and linearizable get requests returns +// "etcdserver: request timed out". +func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + lead := clus.WaitLeader(t) + + timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout() + + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{eps[(lead+1)%2]}, + DialTimeout: 1 * time.Second, + }) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // wait for non-leader to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps[lead], eps[(lead+1)%2]) + + // isolate leader + clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3]) + + // expects balancer endpoint switch while ongoing leader election + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + _, err = cli.Get(ctx, "a") + cancel() + if err != nil { + t.Fatal(err) + } +} + func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) { testBalancerUnderNetworkPartitionWatch(t, true) }