diff --git a/client/http/api.go b/client/http/api.go index 2fae562dd20..1826e2231ee 100644 --- a/client/http/api.go +++ b/client/http/api.go @@ -32,7 +32,7 @@ const ( regionsByKey = "/pd/api/v1/regions/key" RegionsByStoreIDPrefix = "/pd/api/v1/regions/store" EmptyRegions = "/pd/api/v1/regions/check/empty-region" - accelerateSchedule = "/pd/api/v1/regions/accelerate-schedule" + AccelerateSchedule = "/pd/api/v1/regions/accelerate-schedule" store = "/pd/api/v1/store" Stores = "/pd/api/v1/stores" StatsRegion = "/pd/api/v1/stats/region" @@ -45,7 +45,10 @@ const ( PlacementRule = "/pd/api/v1/config/rule" PlacementRules = "/pd/api/v1/config/rules" placementRulesByGroup = "/pd/api/v1/config/rules/group" + PlacementRuleBundle = "/pd/api/v1/config/placement-rule" RegionLabelRule = "/pd/api/v1/config/region-label/rule" + RegionLabelRules = "/pd/api/v1/config/region-label/rules" + RegionLabelRulesByIDs = "/pd/api/v1/config/region-label/rules/ids" // Scheduler Schedulers = "/pd/api/v1/schedulers" scatterRangeScheduler = "/pd/api/v1/schedulers/scatter-range-" @@ -123,6 +126,16 @@ func PlacementRuleByGroupAndID(group, id string) string { return fmt.Sprintf("%s/%s/%s", PlacementRule, group, id) } +// PlacementRuleBundleByGroup returns the path of PD HTTP API to get placement rule bundle by group. +func PlacementRuleBundleByGroup(group string) string { + return fmt.Sprintf("%s/%s", PlacementRuleBundle, group) +} + +// PlacementRuleBundleWithPartialParameter returns the path of PD HTTP API to get placement rule bundle with partial parameter. +func PlacementRuleBundleWithPartialParameter(partial bool) string { + return fmt.Sprintf("%s?partial=%t", PlacementRuleBundle, partial) +} + // SchedulerByName returns the scheduler API with the given scheduler name. func SchedulerByName(name string) string { return fmt.Sprintf("%s/%s", Schedulers, name) diff --git a/client/http/client.go b/client/http/client.go index 6fa2dd8cdfd..d7eb1b1b801 100644 --- a/client/http/client.go +++ b/client/http/client.go @@ -42,6 +42,7 @@ const ( // Client is a PD (Placement Driver) HTTP client. type Client interface { + /* Meta-related interfaces */ GetRegionByID(context.Context, uint64) (*RegionInfo, error) GetRegionByKey(context.Context, []byte) (*RegionInfo, error) GetRegions(context.Context) (*RegionsInfo, error) @@ -51,11 +52,28 @@ type Client interface { GetHotWriteRegions(context.Context) (*StoreHotPeersInfos, error) GetRegionStatusByKeyRange(context.Context, []byte, []byte) (*RegionStats, error) GetStores(context.Context) (*StoresInfo, error) + /* Rule-related interfaces */ + GetAllPlacementRuleBundles(context.Context) ([]*GroupBundle, error) + GetPlacementRuleBundleByGroup(context.Context, string) (*GroupBundle, error) GetPlacementRulesByGroup(context.Context, string) ([]*Rule, error) SetPlacementRule(context.Context, *Rule) error + SetPlacementRuleBundles(context.Context, []*GroupBundle, bool) error DeletePlacementRule(context.Context, string, string) error - GetMinResolvedTSByStoresIDs(context.Context, []uint64) (uint64, map[uint64]uint64, error) + GetAllRegionLabelRules(context.Context) ([]*LabelRule, error) + GetRegionLabelRulesByIDs(context.Context, []string) ([]*LabelRule, error) + SetRegionLabelRule(context.Context, *LabelRule) error + PatchRegionLabelRules(context.Context, *LabelRulePatch) error + /* Scheduling-related interfaces */ AccelerateSchedule(context.Context, []byte, []byte) error + /* Other interfaces */ + GetMinResolvedTSByStoresIDs(context.Context, []uint64) (uint64, map[uint64]uint64, error) + + /* Client-related methods */ + // WithRespHandler sets and returns a new client with the given HTTP response handler. + // This allows the caller to customize how the response is handled, including error handling logic. + // Additionally, it is important for the caller to handle the content of the response body properly + // in order to ensure that it can be read and marshaled correctly into `res`. + WithRespHandler(func(resp *http.Response, res interface{}) error) Client Close() } @@ -66,6 +84,8 @@ type client struct { tlsConf *tls.Config cli *http.Client + respHandler func(resp *http.Response, res interface{}) error + requestCounter *prometheus.CounterVec executionDuration *prometheus.HistogramVec } @@ -143,6 +163,15 @@ func (c *client) Close() { log.Info("[pd] http client closed") } +// WithRespHandler sets and returns a new client with the given HTTP response handler. +func (c *client) WithRespHandler( + handler func(resp *http.Response, res interface{}) error, +) Client { + newClient := *c + newClient.respHandler = handler + return &newClient +} + func (c *client) reqCounter(name, status string) { if c.requestCounter == nil { return @@ -204,6 +233,12 @@ func (c *client) request( } c.execDuration(name, time.Since(start)) c.reqCounter(name, resp.Status) + + // Give away the response handling to the caller if the handler is set. + if c.respHandler != nil { + return c.respHandler(resp, res) + } + defer func() { err = resp.Body.Close() if err != nil { @@ -345,6 +380,30 @@ func (c *client) GetStores(ctx context.Context) (*StoresInfo, error) { return &stores, nil } +// GetAllPlacementRuleBundles gets all placement rules bundles. +func (c *client) GetAllPlacementRuleBundles(ctx context.Context) ([]*GroupBundle, error) { + var bundles []*GroupBundle + err := c.requestWithRetry(ctx, + "GetPlacementRuleBundle", PlacementRuleBundle, + http.MethodGet, nil, &bundles) + if err != nil { + return nil, err + } + return bundles, nil +} + +// GetPlacementRuleBundleByGroup gets the placement rules bundle by group. +func (c *client) GetPlacementRuleBundleByGroup(ctx context.Context, group string) (*GroupBundle, error) { + var bundle GroupBundle + err := c.requestWithRetry(ctx, + "GetPlacementRuleBundleByGroup", PlacementRuleBundleByGroup(group), + http.MethodGet, nil, &bundle) + if err != nil { + return nil, err + } + return &bundle, nil +} + // GetPlacementRulesByGroup gets the placement rules by group. func (c *client) GetPlacementRulesByGroup(ctx context.Context, group string) ([]*Rule, error) { var rules []*Rule @@ -368,6 +427,18 @@ func (c *client) SetPlacementRule(ctx context.Context, rule *Rule) error { http.MethodPost, bytes.NewBuffer(ruleJSON), nil) } +// SetPlacementRuleBundles sets the placement rule bundles. +// If `partial` is false, all old configurations will be over-written and dropped. +func (c *client) SetPlacementRuleBundles(ctx context.Context, bundles []*GroupBundle, partial bool) error { + bundlesJSON, err := json.Marshal(bundles) + if err != nil { + return errors.Trace(err) + } + return c.requestWithRetry(ctx, + "SetPlacementRuleBundles", PlacementRuleBundleWithPartialParameter(partial), + http.MethodPost, bytes.NewBuffer(bundlesJSON), nil) +} + // DeletePlacementRule deletes the placement rule. func (c *client) DeletePlacementRule(ctx context.Context, group, id string) error { return c.requestWithRetry(ctx, @@ -375,6 +446,71 @@ func (c *client) DeletePlacementRule(ctx context.Context, group, id string) erro http.MethodDelete, nil, nil) } +// GetAllRegionLabelRules gets all region label rules. +func (c *client) GetAllRegionLabelRules(ctx context.Context) ([]*LabelRule, error) { + var labelRules []*LabelRule + err := c.requestWithRetry(ctx, + "GetAllRegionLabelRules", RegionLabelRules, + http.MethodGet, nil, &labelRules) + if err != nil { + return nil, err + } + return labelRules, nil +} + +// GetRegionLabelRulesByIDs gets the region label rules by IDs. +func (c *client) GetRegionLabelRulesByIDs(ctx context.Context, ruleIDs []string) ([]*LabelRule, error) { + idsJSON, err := json.Marshal(ruleIDs) + if err != nil { + return nil, errors.Trace(err) + } + var labelRules []*LabelRule + err = c.requestWithRetry(ctx, + "GetRegionLabelRulesByIDs", RegionLabelRulesByIDs, + http.MethodGet, bytes.NewBuffer(idsJSON), &labelRules) + if err != nil { + return nil, err + } + return labelRules, nil +} + +// SetRegionLabelRule sets the region label rule. +func (c *client) SetRegionLabelRule(ctx context.Context, labelRule *LabelRule) error { + labelRuleJSON, err := json.Marshal(labelRule) + if err != nil { + return errors.Trace(err) + } + return c.requestWithRetry(ctx, + "SetRegionLabelRule", RegionLabelRule, + http.MethodPost, bytes.NewBuffer(labelRuleJSON), nil) +} + +// PatchRegionLabelRules patches the region label rules. +func (c *client) PatchRegionLabelRules(ctx context.Context, labelRulePatch *LabelRulePatch) error { + labelRulePatchJSON, err := json.Marshal(labelRulePatch) + if err != nil { + return errors.Trace(err) + } + return c.requestWithRetry(ctx, + "PatchRegionLabelRules", RegionLabelRules, + http.MethodPatch, bytes.NewBuffer(labelRulePatchJSON), nil) +} + +// AccelerateSchedule accelerates the scheduling of the regions within the given key range. +func (c *client) AccelerateSchedule(ctx context.Context, startKey, endKey []byte) error { + input := map[string]string{ + "start_key": url.QueryEscape(string(startKey)), + "end_key": url.QueryEscape(string(endKey)), + } + inputJSON, err := json.Marshal(input) + if err != nil { + return errors.Trace(err) + } + return c.requestWithRetry(ctx, + "AccelerateSchedule", AccelerateSchedule, + http.MethodPost, bytes.NewBuffer(inputJSON), nil) +} + // GetMinResolvedTSByStoresIDs get min-resolved-ts by stores IDs. func (c *client) GetMinResolvedTSByStoresIDs(ctx context.Context, storeIDs []uint64) (uint64, map[uint64]uint64, error) { uri := MinResolvedTSPrefix @@ -406,18 +542,3 @@ func (c *client) GetMinResolvedTSByStoresIDs(ctx context.Context, storeIDs []uin } return resp.MinResolvedTS, resp.StoresMinResolvedTS, nil } - -// AccelerateSchedule accelerates the scheduling of the regions within the given key range. -func (c *client) AccelerateSchedule(ctx context.Context, startKey, endKey []byte) error { - input := map[string]string{ - "start_key": url.QueryEscape(string(startKey)), - "end_key": url.QueryEscape(string(endKey)), - } - inputJSON, err := json.Marshal(input) - if err != nil { - return errors.Trace(err) - } - return c.requestWithRetry(ctx, - "AccelerateSchedule", accelerateSchedule, - http.MethodPost, bytes.NewBuffer(inputJSON), nil) -} diff --git a/client/http/types.go b/client/http/types.go index c6bb0256c14..f948286c2b5 100644 --- a/client/http/types.go +++ b/client/http/types.go @@ -246,3 +246,34 @@ type Rule struct { Version uint64 `json:"version,omitempty"` // only set at runtime, add 1 each time rules updated, begin from 0. CreateTimestamp uint64 `json:"create_timestamp,omitempty"` // only set at runtime, recorded rule create timestamp } + +// GroupBundle represents a rule group and all rules belong to the group. +type GroupBundle struct { + ID string `json:"group_id"` + Index int `json:"group_index"` + Override bool `json:"group_override"` + Rules []*Rule `json:"rules"` +} + +// RegionLabel is the label of a region. +type RegionLabel struct { + Key string `json:"key"` + Value string `json:"value"` + TTL string `json:"ttl,omitempty"` + StartAt string `json:"start_at,omitempty"` +} + +// LabelRule is the rule to assign labels to a region. +type LabelRule struct { + ID string `json:"id"` + Index int `json:"index"` + Labels []RegionLabel `json:"labels"` + RuleType string `json:"rule_type"` + Data interface{} `json:"data"` +} + +// LabelRulePatch is the patch to update the label rules. +type LabelRulePatch struct { + SetRules []*LabelRule `json:"sets"` + DeleteRules []string `json:"deletes"` +} diff --git a/client/retry/backoff.go b/client/retry/backoff.go index e2ca9ab3972..b47a39d8eaa 100644 --- a/client/retry/backoff.go +++ b/client/retry/backoff.go @@ -34,9 +34,11 @@ func (bo *BackOffer) Exec( fn func() error, ) error { if err := fn(); err != nil { + after := time.NewTimer(bo.nextInterval()) + defer after.Stop() select { case <-ctx.Done(): - case <-time.After(bo.nextInterval()): + case <-after.C: failpoint.Inject("backOffExecute", func() { testBackOffExecuteFlag = true }) diff --git a/pkg/replication/replication_mode.go b/pkg/replication/replication_mode.go index 9093f911901..9776a36a8f3 100644 --- a/pkg/replication/replication_mode.go +++ b/pkg/replication/replication_mode.go @@ -442,15 +442,6 @@ func (m *ModeManager) tickUpdateState() { canSync := primaryHasVoter && drHasVoter hasMajority := totalUpVoter*2 > totalVoter - log.Debug("replication store status", - zap.Uint64s("up-primary", storeIDs[primaryUp]), - zap.Uint64s("up-dr", storeIDs[drUp]), - zap.Uint64s("down-primary", storeIDs[primaryDown]), - zap.Uint64s("down-dr", storeIDs[drDown]), - zap.Bool("can-sync", canSync), - zap.Bool("has-majority", hasMajority), - ) - /* +----+ all region sync +------------+ @@ -469,7 +460,8 @@ func (m *ModeManager) tickUpdateState() { */ - switch m.drGetState() { + state := m.drGetState() + switch state { case drStateSync: // If hasMajority is false, the cluster is always unavailable. Switch to async won't help. if !canSync && hasMajority { @@ -511,6 +503,19 @@ func (m *ModeManager) tickUpdateState() { } } } + + logFunc := log.Debug + if state != m.drGetState() { + logFunc = log.Info + } + logFunc("replication store status", + zap.Uint64s("up-primary", storeIDs[primaryUp]), + zap.Uint64s("up-dr", storeIDs[drUp]), + zap.Uint64s("down-primary", storeIDs[primaryDown]), + zap.Uint64s("down-dr", storeIDs[drDown]), + zap.Bool("can-sync", canSync), + zap.Bool("has-majority", hasMajority), + ) } func (m *ModeManager) tickReplicateStatus() { diff --git a/pkg/schedule/operator/operator_test.go b/pkg/schedule/operator/operator_test.go index 9d924738543..c16d929f379 100644 --- a/pkg/schedule/operator/operator_test.go +++ b/pkg/schedule/operator/operator_test.go @@ -17,7 +17,6 @@ package operator import ( "context" "encoding/json" - "fmt" "sync/atomic" "testing" "time" @@ -514,7 +513,7 @@ func (suite *operatorTestSuite) TestOpStepTimeout() { }, } for i, v := range testData { - fmt.Printf("case:%d\n", i) + suite.T().Logf("case: %d", i) for _, step := range v.step { suite.Equal(v.expect, step.Timeout(v.regionSize)) } diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index d2c88d01f09..213aa57de46 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -17,10 +17,12 @@ package client_test import ( "context" "math" + "sort" "testing" "github.com/stretchr/testify/suite" pd "github.com/tikv/pd/client/http" + "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/tests" ) @@ -89,6 +91,13 @@ func (suite *httpClientTestSuite) TestGetMinResolvedTSByStoresIDs() { func (suite *httpClientTestSuite) TestRule() { re := suite.Require() + bundles, err := suite.client.GetAllPlacementRuleBundles(suite.ctx) + re.NoError(err) + re.Len(bundles, 1) + re.Equal(bundles[0].ID, placement.DefaultGroupID) + bundle, err := suite.client.GetPlacementRuleBundleByGroup(suite.ctx, placement.DefaultGroupID) + re.NoError(err) + re.Equal(bundles[0], bundle) rules, err := suite.client.GetPlacementRulesByGroup(suite.ctx, placement.DefaultGroupID) re.NoError(err) re.Len(rules, 1) @@ -96,19 +105,22 @@ func (suite *httpClientTestSuite) TestRule() { re.Equal(placement.DefaultRuleID, rules[0].ID) re.Equal(pd.Voter, rules[0].Role) re.Equal(3, rules[0].Count) - err = suite.client.SetPlacementRule(suite.ctx, &pd.Rule{ + // Should be the same as the rules in the bundle. + re.Equal(bundle.Rules, rules) + testRule := &pd.Rule{ GroupID: placement.DefaultGroupID, ID: "test", - Role: pd.Learner, + Role: pd.Voter, Count: 3, - }) + } + err = suite.client.SetPlacementRule(suite.ctx, testRule) re.NoError(err) rules, err = suite.client.GetPlacementRulesByGroup(suite.ctx, placement.DefaultGroupID) re.NoError(err) re.Len(rules, 2) re.Equal(placement.DefaultGroupID, rules[1].GroupID) re.Equal("test", rules[1].ID) - re.Equal(pd.Learner, rules[1].Role) + re.Equal(pd.Voter, rules[1].Role) re.Equal(3, rules[1].Count) err = suite.client.DeletePlacementRule(suite.ctx, placement.DefaultGroupID, "test") re.NoError(err) @@ -117,6 +129,75 @@ func (suite *httpClientTestSuite) TestRule() { re.Len(rules, 1) re.Equal(placement.DefaultGroupID, rules[0].GroupID) re.Equal(placement.DefaultRuleID, rules[0].ID) + err = suite.client.SetPlacementRuleBundles(suite.ctx, []*pd.GroupBundle{ + { + ID: placement.DefaultGroupID, + Rules: []*pd.Rule{testRule}, + }, + }, true) + re.NoError(err) + bundles, err = suite.client.GetAllPlacementRuleBundles(suite.ctx) + re.NoError(err) + re.Len(bundles, 1) + re.Equal(placement.DefaultGroupID, bundles[0].ID) + re.Len(bundles[0].Rules, 1) + // Make sure the create timestamp is not zero to pass the later assertion. + testRule.CreateTimestamp = bundles[0].Rules[0].CreateTimestamp + re.Equal(testRule, bundles[0].Rules[0]) +} + +func (suite *httpClientTestSuite) TestRegionLabel() { + re := suite.Require() + labelRules, err := suite.client.GetAllRegionLabelRules(suite.ctx) + re.NoError(err) + re.Len(labelRules, 1) + re.Equal("keyspaces/0", labelRules[0].ID) + // Set a new region label rule. + labelRule := &pd.LabelRule{ + ID: "rule1", + Labels: []pd.RegionLabel{{Key: "k1", Value: "v1"}}, + RuleType: "key-range", + Data: labeler.MakeKeyRanges("1234", "5678"), + } + err = suite.client.SetRegionLabelRule(suite.ctx, labelRule) + re.NoError(err) + labelRules, err = suite.client.GetAllRegionLabelRules(suite.ctx) + re.NoError(err) + re.Len(labelRules, 2) + sort.Slice(labelRules, func(i, j int) bool { + return labelRules[i].ID < labelRules[j].ID + }) + re.Equal(labelRule.ID, labelRules[1].ID) + re.Equal(labelRule.Labels, labelRules[1].Labels) + re.Equal(labelRule.RuleType, labelRules[1].RuleType) + // Patch the region label rule. + labelRule = &pd.LabelRule{ + ID: "rule2", + Labels: []pd.RegionLabel{{Key: "k2", Value: "v2"}}, + RuleType: "key-range", + Data: labeler.MakeKeyRanges("ab12", "cd12"), + } + patch := &pd.LabelRulePatch{ + SetRules: []*pd.LabelRule{labelRule}, + DeleteRules: []string{"rule1"}, + } + err = suite.client.PatchRegionLabelRules(suite.ctx, patch) + re.NoError(err) + allLabelRules, err := suite.client.GetAllRegionLabelRules(suite.ctx) + re.NoError(err) + re.Len(labelRules, 2) + sort.Slice(allLabelRules, func(i, j int) bool { + return allLabelRules[i].ID < allLabelRules[j].ID + }) + re.Equal(labelRule.ID, allLabelRules[1].ID) + re.Equal(labelRule.Labels, allLabelRules[1].Labels) + re.Equal(labelRule.RuleType, allLabelRules[1].RuleType) + labelRules, err = suite.client.GetRegionLabelRulesByIDs(suite.ctx, []string{"keyspaces/0", "rule2"}) + re.NoError(err) + sort.Slice(labelRules, func(i, j int) bool { + return labelRules[i].ID < labelRules[j].ID + }) + re.Equal(allLabelRules, labelRules) } func (suite *httpClientTestSuite) TestAccelerateSchedule() { diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index 58006b87eeb..643fb3c7911 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -89,7 +89,7 @@ func (suite *tsoServerTestSuite) TearDownSuite() { func (suite *tsoServerTestSuite) TestTSOServerStartAndStopNormally() { defer func() { if r := recover(); r != nil { - fmt.Println("Recovered from an unexpected panic", r) + suite.T().Log("Recovered from an unexpected panic", r) suite.T().Errorf("Expected no panic, but something bad occurred with") } }() diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 91d6723c2ac..badccd9becc 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -832,7 +832,6 @@ func (suite *configTestSuite) checkPDServerConfig(cluster *tests.TestCluster) { LastHeartbeat: time.Now().UnixNano(), } tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "show", "server") re.NoError(err) @@ -844,7 +843,9 @@ func (suite *configTestSuite) checkPDServerConfig(cluster *tests.TestCluster) { re.Equal("table", conf.KeyType) re.Equal(typeutil.StringSlice([]string{}), conf.RuntimeServices) re.Equal("", conf.MetricStorage) - re.Equal("auto", conf.DashboardAddress) + if conf.DashboardAddress != "auto" { // dashboard has been assigned + re.Equal(leaderServer.GetAddr(), conf.DashboardAddress) + } re.Equal(int(3), conf.FlowRoundByDigit) } diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 9176a00e66d..ffdf56b6567 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -1097,7 +1097,6 @@ func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCl var label labeler.LabelRule escapedID := url.PathEscape("keyspaces/0") u = fmt.Sprintf("%s/config/region-label/rule/%s", urlPrefix, escapedID) - fmt.Println("u====", u) err = tu.ReadGetJSON(re, testDialClient, u, &label) suite.NoError(err) suite.Equal(label.ID, "keyspaces/0") diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index a5861f1ba43..18a82bcf0fe 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -1288,7 +1288,7 @@ func TestTransferLeaderForScheduler(t *testing.T) { re.NoError(err) tc.WaitLeader() // start - leaderServer := tc.GetServer(tc.GetLeader()) + leaderServer := tc.GetLeaderServer() re.NoError(leaderServer.BootstrapCluster()) rc := leaderServer.GetServer().GetRaftCluster() re.NotNil(rc) @@ -1327,7 +1327,7 @@ func TestTransferLeaderForScheduler(t *testing.T) { tc.ResignLeader() rc.Stop() tc.WaitLeader() - leaderServer = tc.GetServer(tc.GetLeader()) + leaderServer = tc.GetLeaderServer() rc1 := leaderServer.GetServer().GetRaftCluster() rc1.Start(leaderServer.GetServer()) re.NoError(err) @@ -1347,7 +1347,7 @@ func TestTransferLeaderForScheduler(t *testing.T) { tc.ResignLeader() rc1.Stop() tc.WaitLeader() - leaderServer = tc.GetServer(tc.GetLeader()) + leaderServer = tc.GetLeaderServer() rc = leaderServer.GetServer().GetRaftCluster() rc.Start(leaderServer.GetServer()) re.NotNil(rc) diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 5965f9e22a6..e6657ffd223 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -260,7 +260,7 @@ func TestPDLeaderLostWhileEtcdLeaderIntact(t *testing.T) { re.NoError(err) leader1 := cluster.WaitLeader() - memberID := cluster.GetServer(leader1).GetLeader().GetMemberId() + memberID := cluster.GetLeaderServer().GetLeader().GetMemberId() re.NoError(failpoint.Enable("github.com/tikv/pd/server/leaderLoopCheckAgain", fmt.Sprintf("return(\"%d\")", memberID))) re.NoError(failpoint.Enable("github.com/tikv/pd/server/exitCampaignLeader", fmt.Sprintf("return(\"%d\")", memberID))) @@ -338,7 +338,7 @@ func TestCampaignLeaderFrequently(t *testing.T) { re.NotEmpty(cluster.GetLeader()) for i := 0; i < 3; i++ { - cluster.GetServers()[cluster.GetLeader()].ResetPDLeader() + cluster.GetLeaderServer().ResetPDLeader() cluster.WaitLeader() } // leader should be changed when campaign leader frequently diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index b73d4abb9b5..6521432c0dc 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -259,7 +259,7 @@ func TestPrepareCheckerWithTransferLeader(t *testing.T) { err = cluster.RunInitialServers() re.NoError(err) cluster.WaitLeader() - leaderServer := cluster.GetServer(cluster.GetLeader()) + leaderServer := cluster.GetLeaderServer() re.NoError(leaderServer.BootstrapCluster()) rc := leaderServer.GetServer().GetRaftCluster() re.NotNil(rc)