diff --git a/client/http/api.go b/client/http/api.go index 5326919561d..2fae562dd20 100644 --- a/client/http/api.go +++ b/client/http/api.go @@ -17,24 +17,58 @@ package http import ( "fmt" "net/url" + "time" ) // The following constants are the paths of PD HTTP APIs. const ( - HotRead = "/pd/api/v1/hotspot/regions/read" - HotWrite = "/pd/api/v1/hotspot/regions/write" - Regions = "/pd/api/v1/regions" - regionByID = "/pd/api/v1/region/id" - regionByKey = "/pd/api/v1/region/key" - regionsByKey = "/pd/api/v1/regions/key" - regionsByStoreID = "/pd/api/v1/regions/store" - Stores = "/pd/api/v1/stores" + // Metadata + HotRead = "/pd/api/v1/hotspot/regions/read" + HotWrite = "/pd/api/v1/hotspot/regions/write" + HotHistory = "/pd/api/v1/hotspot/regions/history" + RegionByIDPrefix = "/pd/api/v1/region/id" + regionByKey = "/pd/api/v1/region/key" + Regions = "/pd/api/v1/regions" + regionsByKey = "/pd/api/v1/regions/key" + RegionsByStoreIDPrefix = "/pd/api/v1/regions/store" + EmptyRegions = "/pd/api/v1/regions/check/empty-region" + accelerateSchedule = "/pd/api/v1/regions/accelerate-schedule" + store = "/pd/api/v1/store" + Stores = "/pd/api/v1/stores" + StatsRegion = "/pd/api/v1/stats/region" + // Config + Config = "/pd/api/v1/config" + ClusterVersion = "/pd/api/v1/config/cluster-version" + ScheduleConfig = "/pd/api/v1/config/schedule" + ReplicateConfig = "/pd/api/v1/config/replicate" + // Rule + PlacementRule = "/pd/api/v1/config/rule" + PlacementRules = "/pd/api/v1/config/rules" + placementRulesByGroup = "/pd/api/v1/config/rules/group" + RegionLabelRule = "/pd/api/v1/config/region-label/rule" + // Scheduler + Schedulers = "/pd/api/v1/schedulers" + scatterRangeScheduler = "/pd/api/v1/schedulers/scatter-range-" + // Admin + ResetTS = "/pd/api/v1/admin/reset-ts" + BaseAllocID = "/pd/api/v1/admin/base-alloc-id" + SnapshotRecoveringMark = "/pd/api/v1/admin/cluster/markers/snapshot-recovering" + // Debug + PProfProfile = "/pd/api/v1/debug/pprof/profile" + PProfHeap = "/pd/api/v1/debug/pprof/heap" + PProfMutex = "/pd/api/v1/debug/pprof/mutex" + PProfAllocs = "/pd/api/v1/debug/pprof/allocs" + PProfBlock = "/pd/api/v1/debug/pprof/block" + PProfGoroutine = "/pd/api/v1/debug/pprof/goroutine" + // Others MinResolvedTSPrefix = "/pd/api/v1/min-resolved-ts" + Status = "/pd/api/v1/status" + Version = "/pd/api/v1/version" ) // RegionByID returns the path of PD HTTP API to get region by ID. func RegionByID(regionID uint64) string { - return fmt.Sprintf("%s/%d", regionByID, regionID) + return fmt.Sprintf("%s/%d", RegionByIDPrefix, regionID) } // RegionByKey returns the path of PD HTTP API to get region by key. @@ -45,10 +79,66 @@ func RegionByKey(key []byte) string { // RegionsByKey returns the path of PD HTTP API to scan regions with given start key, end key and limit parameters. func RegionsByKey(startKey, endKey []byte, limit int) string { return fmt.Sprintf("%s?start_key=%s&end_key=%s&limit=%d", - regionsByKey, url.QueryEscape(string(startKey)), url.QueryEscape(string(endKey)), limit) + regionsByKey, + url.QueryEscape(string(startKey)), + url.QueryEscape(string(endKey)), + limit) } // RegionsByStoreID returns the path of PD HTTP API to get regions by store ID. func RegionsByStoreID(storeID uint64) string { - return fmt.Sprintf("%s/%d", regionsByStoreID, storeID) + return fmt.Sprintf("%s/%d", RegionsByStoreIDPrefix, storeID) +} + +// RegionStatsByKeyRange returns the path of PD HTTP API to get region stats by start key and end key. +func RegionStatsByKeyRange(startKey, endKey []byte) string { + return fmt.Sprintf("%s?start_key=%s&end_key=%s", + StatsRegion, + url.QueryEscape(string(startKey)), + url.QueryEscape(string(endKey))) +} + +// StoreByID returns the store API with store ID parameter. +func StoreByID(id uint64) string { + return fmt.Sprintf("%s/%d", store, id) +} + +// StoreLabelByID returns the store label API with store ID parameter. +func StoreLabelByID(id uint64) string { + return fmt.Sprintf("%s/%d/label", store, id) +} + +// ConfigWithTTLSeconds returns the config API with the TTL seconds parameter. +func ConfigWithTTLSeconds(ttlSeconds float64) string { + return fmt.Sprintf("%s?ttlSecond=%.0f", Config, ttlSeconds) +} + +// PlacementRulesByGroup returns the path of PD HTTP API to get placement rules by group. +func PlacementRulesByGroup(group string) string { + return fmt.Sprintf("%s/%s", placementRulesByGroup, group) +} + +// PlacementRuleByGroupAndID returns the path of PD HTTP API to get placement rule by group and ID. +func PlacementRuleByGroupAndID(group, id string) string { + return fmt.Sprintf("%s/%s/%s", PlacementRule, group, id) +} + +// SchedulerByName returns the scheduler API with the given scheduler name. +func SchedulerByName(name string) string { + return fmt.Sprintf("%s/%s", Schedulers, name) +} + +// ScatterRangeSchedulerWithName returns the scatter range scheduler API with name parameter. +func ScatterRangeSchedulerWithName(name string) string { + return fmt.Sprintf("%s%s", scatterRangeScheduler, name) +} + +// PProfProfileAPIWithInterval returns the pprof profile API with interval parameter. +func PProfProfileAPIWithInterval(interval time.Duration) string { + return fmt.Sprintf("%s?seconds=%d", PProfProfile, interval/time.Second) +} + +// PProfGoroutineWithDebugLevel returns the pprof goroutine API with debug level parameter. +func PProfGoroutineWithDebugLevel(level int) string { + return fmt.Sprintf("%s?debug=%d", PProfGoroutine, level) } diff --git a/client/http/client.go b/client/http/client.go index 6cb1277dfcb..6fa2dd8cdfd 100644 --- a/client/http/client.go +++ b/client/http/client.go @@ -15,12 +15,14 @@ package http import ( + "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" "net/http" + "net/url" "strings" "time" @@ -43,12 +45,17 @@ type Client interface { GetRegionByID(context.Context, uint64) (*RegionInfo, error) GetRegionByKey(context.Context, []byte) (*RegionInfo, error) GetRegions(context.Context) (*RegionsInfo, error) - GetRegionsByKey(context.Context, []byte, []byte, int) (*RegionsInfo, error) + GetRegionsByKeyRange(context.Context, []byte, []byte, int) (*RegionsInfo, error) GetRegionsByStoreID(context.Context, uint64) (*RegionsInfo, error) GetHotReadRegions(context.Context) (*StoreHotPeersInfos, error) GetHotWriteRegions(context.Context) (*StoreHotPeersInfos, error) + GetRegionStatusByKeyRange(context.Context, []byte, []byte) (*RegionStats, error) GetStores(context.Context) (*StoresInfo, error) + GetPlacementRulesByGroup(context.Context, string) ([]*Rule, error) + SetPlacementRule(context.Context, *Rule) error + DeletePlacementRule(context.Context, string, string) error GetMinResolvedTSByStoresIDs(context.Context, []uint64) (uint64, map[uint64]uint64, error) + AccelerateSchedule(context.Context, []byte, []byte) error Close() } @@ -154,8 +161,8 @@ func (c *client) execDuration(name string, duration time.Duration) { // it consistent with the current implementation of some clients (e.g. TiDB). func (c *client) requestWithRetry( ctx context.Context, - name, uri string, - res interface{}, + name, uri, method string, + body io.Reader, res interface{}, ) error { var ( err error @@ -163,7 +170,7 @@ func (c *client) requestWithRetry( ) for idx := 0; idx < len(c.pdAddrs); idx++ { addr = c.pdAddrs[idx] - err = c.request(ctx, name, addr, uri, res) + err = c.request(ctx, name, fmt.Sprintf("%s%s", addr, uri), method, body, res) if err == nil { break } @@ -175,16 +182,15 @@ func (c *client) requestWithRetry( func (c *client) request( ctx context.Context, - name, addr, uri string, - res interface{}, + name, url, method string, + body io.Reader, res interface{}, ) error { - reqURL := fmt.Sprintf("%s%s", addr, uri) logFields := []zap.Field{ zap.String("name", name), - zap.String("url", reqURL), + zap.String("url", url), } log.Debug("[pd] request the http url", logFields...) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqURL, nil) + req, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { log.Error("[pd] create http request failed", append(logFields, zap.Error(err))...) return errors.Trace(err) @@ -219,6 +225,10 @@ func (c *client) request( return errors.Errorf("request pd http api failed with status: '%s'", resp.Status) } + if res == nil { + return nil + } + err = json.NewDecoder(resp.Body).Decode(res) if err != nil { return errors.Trace(err) @@ -229,7 +239,9 @@ func (c *client) request( // GetRegionByID gets the region info by ID. func (c *client) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { var region RegionInfo - err := c.requestWithRetry(ctx, "GetRegionByID", RegionByID(regionID), ®ion) + err := c.requestWithRetry(ctx, + "GetRegionByID", RegionByID(regionID), + http.MethodGet, nil, ®ion) if err != nil { return nil, err } @@ -239,7 +251,9 @@ func (c *client) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInf // GetRegionByKey gets the region info by key. func (c *client) GetRegionByKey(ctx context.Context, key []byte) (*RegionInfo, error) { var region RegionInfo - err := c.requestWithRetry(ctx, "GetRegionByKey", RegionByKey(key), ®ion) + err := c.requestWithRetry(ctx, + "GetRegionByKey", RegionByKey(key), + http.MethodGet, nil, ®ion) if err != nil { return nil, err } @@ -249,17 +263,21 @@ func (c *client) GetRegionByKey(ctx context.Context, key []byte) (*RegionInfo, e // GetRegions gets the regions info. func (c *client) GetRegions(ctx context.Context) (*RegionsInfo, error) { var regions RegionsInfo - err := c.requestWithRetry(ctx, "GetRegions", Regions, ®ions) + err := c.requestWithRetry(ctx, + "GetRegions", Regions, + http.MethodGet, nil, ®ions) if err != nil { return nil, err } return ®ions, nil } -// GetRegionsByKey gets the regions info by key range. If the limit is -1, it will return all regions within the range. -func (c *client) GetRegionsByKey(ctx context.Context, startKey, endKey []byte, limit int) (*RegionsInfo, error) { +// GetRegionsByKeyRange gets the regions info by key range. If the limit is -1, it will return all regions within the range. +func (c *client) GetRegionsByKeyRange(ctx context.Context, startKey, endKey []byte, limit int) (*RegionsInfo, error) { var regions RegionsInfo - err := c.requestWithRetry(ctx, "GetRegionsByKey", RegionsByKey(startKey, endKey, limit), ®ions) + err := c.requestWithRetry(ctx, + "GetRegionsByKeyRange", RegionsByKey(startKey, endKey, limit), + http.MethodGet, nil, ®ions) if err != nil { return nil, err } @@ -269,7 +287,9 @@ func (c *client) GetRegionsByKey(ctx context.Context, startKey, endKey []byte, l // GetRegionsByStoreID gets the regions info by store ID. func (c *client) GetRegionsByStoreID(ctx context.Context, storeID uint64) (*RegionsInfo, error) { var regions RegionsInfo - err := c.requestWithRetry(ctx, "GetRegionsByStoreID", RegionsByStoreID(storeID), ®ions) + err := c.requestWithRetry(ctx, + "GetRegionsByStoreID", RegionsByStoreID(storeID), + http.MethodGet, nil, ®ions) if err != nil { return nil, err } @@ -279,7 +299,9 @@ func (c *client) GetRegionsByStoreID(ctx context.Context, storeID uint64) (*Regi // GetHotReadRegions gets the hot read region statistics info. func (c *client) GetHotReadRegions(ctx context.Context) (*StoreHotPeersInfos, error) { var hotReadRegions StoreHotPeersInfos - err := c.requestWithRetry(ctx, "GetHotReadRegions", HotRead, &hotReadRegions) + err := c.requestWithRetry(ctx, + "GetHotReadRegions", HotRead, + http.MethodGet, nil, &hotReadRegions) if err != nil { return nil, err } @@ -289,23 +311,70 @@ func (c *client) GetHotReadRegions(ctx context.Context) (*StoreHotPeersInfos, er // GetHotWriteRegions gets the hot write region statistics info. func (c *client) GetHotWriteRegions(ctx context.Context) (*StoreHotPeersInfos, error) { var hotWriteRegions StoreHotPeersInfos - err := c.requestWithRetry(ctx, "GetHotWriteRegions", HotWrite, &hotWriteRegions) + err := c.requestWithRetry(ctx, + "GetHotWriteRegions", HotWrite, + http.MethodGet, nil, &hotWriteRegions) if err != nil { return nil, err } return &hotWriteRegions, nil } +// GetRegionStatusByKeyRange gets the region status by key range. +func (c *client) GetRegionStatusByKeyRange(ctx context.Context, startKey, endKey []byte) (*RegionStats, error) { + var regionStats RegionStats + err := c.requestWithRetry(ctx, + "GetRegionStatusByKeyRange", RegionStatsByKeyRange(startKey, endKey), + http.MethodGet, nil, ®ionStats, + ) + if err != nil { + return nil, err + } + return ®ionStats, nil +} + // GetStores gets the stores info. func (c *client) GetStores(ctx context.Context) (*StoresInfo, error) { var stores StoresInfo - err := c.requestWithRetry(ctx, "GetStores", Stores, &stores) + err := c.requestWithRetry(ctx, + "GetStores", Stores, + http.MethodGet, nil, &stores) if err != nil { return nil, err } return &stores, nil } +// GetPlacementRulesByGroup gets the placement rules by group. +func (c *client) GetPlacementRulesByGroup(ctx context.Context, group string) ([]*Rule, error) { + var rules []*Rule + err := c.requestWithRetry(ctx, + "GetPlacementRulesByGroup", PlacementRulesByGroup(group), + http.MethodGet, nil, &rules) + if err != nil { + return nil, err + } + return rules, nil +} + +// SetPlacementRule sets the placement rule. +func (c *client) SetPlacementRule(ctx context.Context, rule *Rule) error { + ruleJSON, err := json.Marshal(rule) + if err != nil { + return errors.Trace(err) + } + return c.requestWithRetry(ctx, + "SetPlacementRule", PlacementRule, + http.MethodPost, bytes.NewBuffer(ruleJSON), nil) +} + +// DeletePlacementRule deletes the placement rule. +func (c *client) DeletePlacementRule(ctx context.Context, group, id string) error { + return c.requestWithRetry(ctx, + "DeletePlacementRule", PlacementRuleByGroupAndID(group, id), + http.MethodDelete, nil, nil) +} + // GetMinResolvedTSByStoresIDs get min-resolved-ts by stores IDs. func (c *client) GetMinResolvedTSByStoresIDs(ctx context.Context, storeIDs []uint64) (uint64, map[uint64]uint64, error) { uri := MinResolvedTSPrefix @@ -326,7 +395,9 @@ func (c *client) GetMinResolvedTSByStoresIDs(ctx context.Context, storeIDs []uin IsRealTime bool `json:"is_real_time,omitempty"` StoresMinResolvedTS map[uint64]uint64 `json:"stores_min_resolved_ts"` }{} - err := c.requestWithRetry(ctx, "GetMinResolvedTSByStoresIDs", uri, &resp) + err := c.requestWithRetry(ctx, + "GetMinResolvedTSByStoresIDs", uri, + http.MethodGet, nil, &resp) if err != nil { return 0, nil, err } @@ -335,3 +406,18 @@ func (c *client) GetMinResolvedTSByStoresIDs(ctx context.Context, storeIDs []uin } return resp.MinResolvedTS, resp.StoresMinResolvedTS, nil } + +// AccelerateSchedule accelerates the scheduling of the regions within the given key range. +func (c *client) AccelerateSchedule(ctx context.Context, startKey, endKey []byte) error { + input := map[string]string{ + "start_key": url.QueryEscape(string(startKey)), + "end_key": url.QueryEscape(string(endKey)), + } + inputJSON, err := json.Marshal(input) + if err != nil { + return errors.Trace(err) + } + return c.requestWithRetry(ctx, + "AccelerateSchedule", accelerateSchedule, + http.MethodPost, bytes.NewBuffer(inputJSON), nil) +} diff --git a/client/http/types.go b/client/http/types.go index 66eb31ec3a1..c6bb0256c14 100644 --- a/client/http/types.go +++ b/client/http/types.go @@ -176,3 +176,73 @@ type StoreStatus struct { LastHeartbeatTS time.Time `json:"last_heartbeat_ts"` Uptime string `json:"uptime"` } + +// RegionStats stores the statistics of regions. +type RegionStats struct { + Count int `json:"count"` + EmptyCount int `json:"empty_count"` + StorageSize int64 `json:"storage_size"` + StorageKeys int64 `json:"storage_keys"` + StoreLeaderCount map[uint64]int `json:"store_leader_count"` + StorePeerCount map[uint64]int `json:"store_peer_count"` +} + +// PeerRoleType is the expected peer type of the placement rule. +type PeerRoleType string + +const ( + // Voter can either match a leader peer or follower peer + Voter PeerRoleType = "voter" + // Leader matches a leader. + Leader PeerRoleType = "leader" + // Follower matches a follower. + Follower PeerRoleType = "follower" + // Learner matches a learner. + Learner PeerRoleType = "learner" +) + +// LabelConstraint is used to filter store when trying to place peer of a region. +type LabelConstraint struct { + Key string `json:"key,omitempty"` + Op LabelConstraintOp `json:"op,omitempty"` + Values []string `json:"values,omitempty"` +} + +// LabelConstraintOp defines how a LabelConstraint matches a store. It can be one of +// 'in', 'notIn', 'exists', or 'notExists'. +type LabelConstraintOp string + +const ( + // In restricts the store label value should in the value list. + // If label does not exist, `in` is always false. + In LabelConstraintOp = "in" + // NotIn restricts the store label value should not in the value list. + // If label does not exist, `notIn` is always true. + NotIn LabelConstraintOp = "notIn" + // Exists restricts the store should have the label. + Exists LabelConstraintOp = "exists" + // NotExists restricts the store should not have the label. + NotExists LabelConstraintOp = "notExists" +) + +// Rule is the placement rule that can be checked against a region. When +// applying rules (apply means schedule regions to match selected rules), the +// apply order is defined by the tuple [GroupIndex, GroupID, Index, ID]. +type Rule struct { + GroupID string `json:"group_id"` // mark the source that add the rule + ID string `json:"id"` // unique ID within a group + Index int `json:"index,omitempty"` // rule apply order in a group, rule with less ID is applied first when indexes are equal + Override bool `json:"override,omitempty"` // when it is true, all rules with less indexes are disabled + StartKey []byte `json:"-"` // range start key + StartKeyHex string `json:"start_key"` // hex format start key, for marshal/unmarshal + EndKey []byte `json:"-"` // range end key + EndKeyHex string `json:"end_key"` // hex format end key, for marshal/unmarshal + Role PeerRoleType `json:"role"` // expected role of the peers + IsWitness bool `json:"is_witness"` // when it is true, it means the role is also a witness + Count int `json:"count"` // expected count of the peers + LabelConstraints []LabelConstraint `json:"label_constraints,omitempty"` // used to select stores to place peers + LocationLabels []string `json:"location_labels,omitempty"` // used to make peers isolated physically + IsolationLevel string `json:"isolation_level,omitempty"` // used to isolate replicas explicitly and forcibly + Version uint64 `json:"version,omitempty"` // only set at runtime, add 1 each time rules updated, begin from 0. + CreateTimestamp uint64 `json:"create_timestamp,omitempty"` // only set at runtime, recorded rule create timestamp +} diff --git a/pkg/mock/mockcluster/config.go b/pkg/mock/mockcluster/config.go index 6febba026e8..a2e11b43deb 100644 --- a/pkg/mock/mockcluster/config.go +++ b/pkg/mock/mockcluster/config.go @@ -154,8 +154,8 @@ func (mc *Cluster) SetMaxReplicasWithLabel(enablePlacementRules bool, num int, l } if enablePlacementRules { rule := &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 1, StartKey: []byte(""), EndKey: []byte(""), diff --git a/pkg/schedule/checker/merge_checker_test.go b/pkg/schedule/checker/merge_checker_test.go index 6478eb0b2c4..5e9311c76cd 100644 --- a/pkg/schedule/checker/merge_checker_test.go +++ b/pkg/schedule/checker/merge_checker_test.go @@ -188,7 +188,7 @@ func (suite *mergeCheckerTestSuite) TestBasic() { // merge cannot across rule key. suite.cluster.SetEnablePlacementRules(true) suite.cluster.RuleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 1, Override: true, @@ -202,7 +202,7 @@ func (suite *mergeCheckerTestSuite) TestBasic() { suite.NotNil(ops) suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) - suite.cluster.RuleManager.DeleteRule("pd", "test") + suite.cluster.RuleManager.DeleteRule(placement.DefaultGroupID, "test") // check 'merge_option' label suite.cluster.GetRegionLabeler().SetLabelRule(&labeler.LabelRule{ diff --git a/pkg/schedule/checker/rule_checker_test.go b/pkg/schedule/checker/rule_checker_test.go index 4185ce6c167..e77830fac49 100644 --- a/pkg/schedule/checker/rule_checker_test.go +++ b/pkg/schedule/checker/rule_checker_test.go @@ -88,7 +88,7 @@ func (suite *ruleCheckerTestSuite) TestAddRulePeerWithIsolationLevel() { suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z1", "rack": "r3", "host": "h1"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -101,7 +101,7 @@ func (suite *ruleCheckerTestSuite) TestAddRulePeerWithIsolationLevel() { suite.Nil(op) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -125,9 +125,9 @@ func (suite *ruleCheckerTestSuite) TestReplaceDownPeerWithIsolationLevel() { suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3", "host": "h5"}) suite.cluster.AddLabelsStore(6, 1, map[string]string{"zone": "z3", "host": "h6"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 5) - suite.ruleManager.DeleteRule("pd", "default") + suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -331,7 +331,7 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeers2() { suite.cluster.AddLabelsStore(3, 1, map[string]string{"foo": "baz"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: true, @@ -367,7 +367,7 @@ func (suite *ruleCheckerTestSuite) TestFixRoleLeader() { suite.cluster.AddLabelsStore(3, 1, map[string]string{"role": "voter"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: true, @@ -378,7 +378,7 @@ func (suite *ruleCheckerTestSuite) TestFixRoleLeader() { }, }) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r2", Index: 101, Role: placement.Follower, @@ -398,7 +398,7 @@ func (suite *ruleCheckerTestSuite) TestFixRoleLeaderIssue3130() { suite.cluster.AddLabelsStore(2, 1, map[string]string{"role": "leader"}) suite.cluster.AddLeaderRegion(1, 1, 2) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: true, @@ -471,7 +471,7 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness() { suite.cluster.AddLeaderRegion(1, 1) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: true, @@ -497,7 +497,7 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness2() { suite.cluster.AddLeaderRegion(1, 1, 2, 3, 4) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: false, @@ -544,8 +544,8 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness4() { err := suite.ruleManager.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 100, Override: true, Role: placement.Voter, @@ -553,7 +553,7 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness4() { IsWitness: false, }, { - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: false, @@ -580,7 +580,7 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness5() { suite.cluster.AddLeaderRegion(1, 1, 2, 3) err := suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: true, @@ -603,15 +603,15 @@ func (suite *ruleCheckerTestSuite) TestFixRuleWitness6() { err := suite.ruleManager.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 100, Role: placement.Voter, IsWitness: false, Count: 2, }, { - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Role: placement.Voter, @@ -641,15 +641,15 @@ func (suite *ruleCheckerTestSuite) TestDisableWitness() { err := suite.ruleManager.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 100, Role: placement.Voter, IsWitness: false, Count: 2, }, { - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Role: placement.Voter, @@ -680,7 +680,7 @@ func (suite *ruleCheckerTestSuite) TestBetterReplacement() { suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host3"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -704,7 +704,7 @@ func (suite *ruleCheckerTestSuite) TestBetterReplacement2() { suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z2", "host": "host1"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -727,7 +727,7 @@ func (suite *ruleCheckerTestSuite) TestNoBetterReplacement() { suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -835,8 +835,8 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", leader, followers...) rule := &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 5, StartKey: []byte{}, @@ -853,8 +853,8 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule // change rule to 3 replicas rule = &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3, StartKey: []byte{}, @@ -941,8 +941,8 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule suite.cluster.AddLeaderRegionWithRange(1, "", "", leader, voterFollowers...) err := suite.ruleManager.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 100, Override: true, Role: placement.Voter, @@ -950,7 +950,7 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule IsWitness: false, }, { - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Index: 100, Override: false, @@ -975,10 +975,10 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule suite.cluster.SetStoreDisconnect(testCase[2]) // change rule to 3 replicas - suite.ruleManager.DeleteRule("pd", "r1") + suite.ruleManager.DeleteRule(placement.DefaultGroupID, "r1") suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3, StartKey: []byte{}, @@ -1106,13 +1106,13 @@ func (suite *ruleCheckerTestSuite) TestPriorityFitHealthPeersAndTiFlash() { suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4", "engine": "tiflash"}) suite.cluster.AddRegionWithLearner(1, 1, []uint64{2, 3}, []uint64{4}) rule := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Role: placement.Voter, Count: 3, } rule2 := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test2", Role: placement.Learner, Count: 1, @@ -1126,7 +1126,7 @@ func (suite *ruleCheckerTestSuite) TestPriorityFitHealthPeersAndTiFlash() { } suite.ruleManager.SetRule(rule) suite.ruleManager.SetRule(rule2) - suite.ruleManager.DeleteRule("pd", "default") + suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) r1 := suite.cluster.GetRegion(1) // set peer3 to pending and down @@ -1177,12 +1177,12 @@ func (suite *ruleCheckerTestSuite) TestIssue3293() { suite.cluster.DeleteStore(suite.cluster.GetStore(5)) err = suite.ruleManager.SetRule(&placement.Rule{ GroupID: "TiDB_DDL_51", - ID: "default", + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3, }) suite.NoError(err) - err = suite.ruleManager.DeleteRule("pd", "default") + err = suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) suite.NoError(err) op := suite.rc.Check(suite.cluster.GetRegion(1)) suite.NotNil(op) @@ -1290,7 +1290,7 @@ func (suite *ruleCheckerTestSuite) TestFixDownPeer() { suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -1346,13 +1346,13 @@ func (suite *ruleCheckerTestSuite) TestFixDownWitnessPeer() { r = r.Clone(core.WithWitnesses([]*metapb.Peer{r.GetPeer(2)})) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 2, }) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Role: placement.Voter, Count: 1, @@ -1379,13 +1379,13 @@ func (suite *ruleCheckerTestSuite) TestFixDownPeerWithAvailableWitness() { r = r.Clone(core.WithWitnesses([]*metapb.Peer{r.GetPeer(3)})) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 2, }) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Role: placement.Voter, Count: 1, @@ -1417,13 +1417,13 @@ func (suite *ruleCheckerTestSuite) TestFixDownPeerWithAvailableWitness2() { r = r.Clone(core.WithWitnesses([]*metapb.Peer{r.GetPeer(3)})) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 2, }) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Role: placement.Voter, Count: 1, @@ -1451,13 +1451,13 @@ func (suite *ruleCheckerTestSuite) TestFixDownPeerWithAvailableWitness3() { r = r.Clone(core.WithWitnesses([]*metapb.Peer{r.GetPeer(3)})) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 2, }) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Role: placement.Voter, Count: 1, @@ -1508,7 +1508,7 @@ func (suite *ruleCheckerTestSuite) TestFixOfflinePeer() { suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -1543,13 +1543,13 @@ func (suite *ruleCheckerTestSuite) TestFixOfflinePeerWithAvaliableWitness() { r := suite.cluster.GetRegion(1) r = r.Clone(core.WithWitnesses([]*metapb.Peer{r.GetPeer(2)})) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 2, }) suite.ruleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "r1", Role: placement.Voter, Count: 1, @@ -1573,7 +1573,7 @@ func (suite *ruleCheckerTestSuite) TestRuleCache() { suite.cluster.AddRegionStore(999, 1) suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Index: 100, Override: true, @@ -1592,7 +1592,7 @@ func (suite *ruleCheckerTestSuite) TestRuleCache() { stillCached bool }{ { - name: "default", + name: placement.DefaultRuleID, region: region, stillCached: true, }, @@ -1718,7 +1718,7 @@ func (suite *ruleCheckerTestSuite) TestDemoteVoter() { suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) region := suite.cluster.AddLeaderRegion(1, 1, 4) rule := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test", Role: placement.Voter, Count: 1, @@ -1731,7 +1731,7 @@ func (suite *ruleCheckerTestSuite) TestDemoteVoter() { }, } rule2 := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test2", Role: placement.Learner, Count: 1, @@ -1745,7 +1745,7 @@ func (suite *ruleCheckerTestSuite) TestDemoteVoter() { } suite.ruleManager.SetRule(rule) suite.ruleManager.SetRule(rule2) - suite.ruleManager.DeleteRule("pd", "default") + suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) op := suite.rc.Check(region) suite.NotNil(op) suite.Equal("fix-demote-voter", op.Desc()) @@ -1807,7 +1807,7 @@ func (suite *ruleCheckerTestSuite) TestLocationLabels() { suite.cluster.AddLabelsStore(6, 1, map[string]string{"zone": "z2", "rack": "r3", "host": "h2"}) suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 5) rule1 := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test1", Role: placement.Leader, Count: 1, @@ -1821,7 +1821,7 @@ func (suite *ruleCheckerTestSuite) TestLocationLabels() { LocationLabels: []string{"rack"}, } rule2 := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test2", Role: placement.Voter, Count: 1, @@ -1835,7 +1835,7 @@ func (suite *ruleCheckerTestSuite) TestLocationLabels() { LocationLabels: []string{"rack"}, } rule3 := &placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test3", Role: placement.Voter, Count: 1, @@ -1851,7 +1851,7 @@ func (suite *ruleCheckerTestSuite) TestLocationLabels() { suite.ruleManager.SetRule(rule1) suite.ruleManager.SetRule(rule2) suite.ruleManager.SetRule(rule3) - suite.ruleManager.DeleteRule("pd", "default") + suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) op := suite.rc.Check(suite.cluster.GetRegion(1)) suite.NotNil(op) suite.Equal("move-to-better-location", op.Desc()) @@ -1882,7 +1882,7 @@ func (suite *ruleCheckerTestSuite) TestTiFlashLocationLabels() { }, } suite.ruleManager.SetRule(rule1) - rule := suite.ruleManager.GetRule("pd", "default") + rule := suite.ruleManager.GetRule(placement.DefaultGroupID, placement.DefaultRuleID) rule.LocationLabels = []string{"zone", "rack", "host"} suite.ruleManager.SetRule(rule) op := suite.rc.Check(suite.cluster.GetRegion(1)) diff --git a/pkg/schedule/filter/filters_test.go b/pkg/schedule/filter/filters_test.go index fa085890694..f030dff81a4 100644 --- a/pkg/schedule/filter/filters_test.go +++ b/pkg/schedule/filter/filters_test.go @@ -159,7 +159,7 @@ func TestRuleFitFilterWithPlacementRule(t *testing.T) { testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetEnablePlacementRules(true) ruleManager := testCluster.RuleManager - ruleManager.DeleteRule("pd", "default") + ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) err := ruleManager.SetRules([]*placement.Rule{ { GroupID: "test", diff --git a/pkg/schedule/operator/create_operator_test.go b/pkg/schedule/operator/create_operator_test.go index 08a30680303..2fcd45d11f2 100644 --- a/pkg/schedule/operator/create_operator_test.go +++ b/pkg/schedule/operator/create_operator_test.go @@ -1145,8 +1145,8 @@ func TestCreateLeaveJointStateOperatorWithoutFitRules(t *testing.T) { cluster := mockcluster.NewCluster(ctx, opts) re.NoError(cluster.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, StartKeyHex: hex.EncodeToString([]byte("")), EndKeyHex: hex.EncodeToString([]byte("")), Role: placement.Voter, diff --git a/pkg/schedule/placement/fit_region_test.go b/pkg/schedule/placement/fit_region_test.go index 0ec67b2a2aa..5bc62d9cc12 100644 --- a/pkg/schedule/placement/fit_region_test.go +++ b/pkg/schedule/placement/fit_region_test.go @@ -55,8 +55,8 @@ func (ms mockStoresSet) GetStore(id uint64) *core.StoreInfo { func addExtraRules(extraRules int) []*Rule { rules := make([]*Rule, 0) rules = append(rules, &Rule{ - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 3, LocationLabels: []string{}, @@ -110,8 +110,8 @@ func BenchmarkFitRegion(b *testing.B) { region := mockRegion(3, 0) rules := []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 3, LocationLabels: []string{}, @@ -129,8 +129,8 @@ func BenchmarkFitRegionMoreStores(b *testing.B) { region := mockRegion(3, 0) rules := []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 3, LocationLabels: []string{}, @@ -148,8 +148,8 @@ func BenchmarkFitRegionMorePeers(b *testing.B) { region := mockRegion(5, 0) rules := []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 5, LocationLabels: []string{}, @@ -167,14 +167,14 @@ func BenchmarkFitRegionMorePeersEquals(b *testing.B) { region := mockRegion(3, 0) rules := []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Leader, Count: 1, LocationLabels: []string{}, }, { - GroupID: "pd", + GroupID: DefaultGroupID, ID: "default-2", Role: Follower, Count: 4, @@ -193,8 +193,8 @@ func BenchmarkFitRegionMorePeersSplitRules(b *testing.B) { region := mockRegion(3, 0) rules := []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Leader, Count: 1, LocationLabels: []string{}, @@ -202,7 +202,7 @@ func BenchmarkFitRegionMorePeersSplitRules(b *testing.B) { } for i := 0; i < 4; i++ { rules = append(rules, &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), Role: Follower, Count: 1, @@ -221,8 +221,8 @@ func BenchmarkFitRegionMoreVotersSplitRules(b *testing.B) { region := mockRegion(5, 0) rules := []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 1, LocationLabels: []string{}, @@ -230,7 +230,7 @@ func BenchmarkFitRegionMoreVotersSplitRules(b *testing.B) { } for i := 0; i < 4; i++ { rules = append(rules, &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), Role: Voter, Count: 1, @@ -260,7 +260,7 @@ func BenchmarkFitRegionCrossRegion(b *testing.B) { region := mockRegion(5, 0) rules := make([]*Rule, 0) rules = append(rules, &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: "1", Role: Leader, Count: 1, @@ -268,7 +268,7 @@ func BenchmarkFitRegionCrossRegion(b *testing.B) { }) for i := 0; i < 2; i++ { rules = append(rules, &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), Role: Follower, Count: 1, @@ -289,7 +289,7 @@ func BenchmarkFitRegionWithMoreRulesAndStoreLabels(b *testing.B) { // create 100 rules, with each rule has 101 LabelConstraints. for i := 0; i < 100; i++ { rule := &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), Role: Follower, Count: 3, @@ -351,7 +351,7 @@ func BenchmarkFitRegionWithLocationLabels(b *testing.B) { region := mockRegion(5, 5) rules := []*Rule{} rule := &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: "followers", Role: Follower, Count: 3, @@ -360,7 +360,7 @@ func BenchmarkFitRegionWithLocationLabels(b *testing.B) { } rules = append(rules, rule) rule = &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: "learner", Role: Learner, Count: 3, @@ -369,7 +369,7 @@ func BenchmarkFitRegionWithLocationLabels(b *testing.B) { } rules = append(rules, rule) rule = &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: "voters", Role: Voter, Count: 4, diff --git a/pkg/schedule/placement/region_rule_cache_test.go b/pkg/schedule/placement/region_rule_cache_test.go index b4164e85530..835203bed26 100644 --- a/pkg/schedule/placement/region_rule_cache_test.go +++ b/pkg/schedule/placement/region_rule_cache_test.go @@ -99,8 +99,8 @@ func TestRegionRuleFitCache(t *testing.T) { region: mockRegion(3, 0), rules: []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 4, Version: 1, @@ -114,8 +114,8 @@ func TestRegionRuleFitCache(t *testing.T) { region: mockRegion(3, 0), rules: []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: 3, CreateTimestamp: 1, @@ -141,7 +141,7 @@ func TestRegionRuleFitCache(t *testing.T) { region: mockRegion(3, 0), rules: []*Rule{ { - GroupID: "pd", + GroupID: DefaultGroupID, ID: "default-2", Role: Voter, Count: 3, @@ -155,7 +155,7 @@ func TestRegionRuleFitCache(t *testing.T) { region: nil, rules: []*Rule{ { - GroupID: "pd", + GroupID: DefaultGroupID, ID: "default-2", Role: Voter, Count: 3, diff --git a/pkg/schedule/placement/rule_manager.go b/pkg/schedule/placement/rule_manager.go index a7e169b74aa..e25b8802b45 100644 --- a/pkg/schedule/placement/rule_manager.go +++ b/pkg/schedule/placement/rule_manager.go @@ -37,6 +37,15 @@ import ( "golang.org/x/exp/slices" ) +const ( + // DefaultGroupID is the default rule group ID. + DefaultGroupID = "pd" + // DefaultRuleID is the default rule ID. + DefaultRuleID = "default" + // defaultWitnessRuleID is the default witness rule ID. + defaultWitnessRuleID = "witness" +) + // RuleManager is responsible for the lifecycle of all placement Rules. // It is thread safe. type RuleManager struct { @@ -88,16 +97,16 @@ func (m *RuleManager) Initialize(maxReplica int, locationLabels []string, isolat defaultRules = append(defaultRules, []*Rule{ { - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: maxReplica - witnessCount, LocationLabels: locationLabels, IsolationLevel: isolationLevel, }, { - GroupID: "pd", - ID: "witness", + GroupID: DefaultGroupID, + ID: defaultWitnessRuleID, Role: Voter, Count: witnessCount, IsWitness: true, @@ -108,8 +117,8 @@ func (m *RuleManager) Initialize(maxReplica int, locationLabels []string, isolat ) } else { defaultRules = append(defaultRules, &Rule{ - GroupID: "pd", - ID: "default", + GroupID: DefaultGroupID, + ID: DefaultRuleID, Role: Voter, Count: maxReplica, LocationLabels: locationLabels, diff --git a/pkg/schedule/placement/rule_manager_test.go b/pkg/schedule/placement/rule_manager_test.go index dad50a2d881..68a18b538d4 100644 --- a/pkg/schedule/placement/rule_manager_test.go +++ b/pkg/schedule/placement/rule_manager_test.go @@ -44,8 +44,8 @@ func TestDefault(t *testing.T) { _, manager := newTestManager(t, false) rules := manager.GetAllRules() re.Len(rules, 1) - re.Equal("pd", rules[0].GroupID) - re.Equal("default", rules[0].ID) + re.Equal(DefaultGroupID, rules[0].GroupID) + re.Equal(DefaultRuleID, rules[0].ID) re.Equal(0, rules[0].Index) re.Empty(rules[0].StartKey) re.Empty(rules[0].EndKey) @@ -58,15 +58,15 @@ func TestDefault2(t *testing.T) { _, manager := newTestManager(t, true) rules := manager.GetAllRules() re.Len(rules, 2) - re.Equal("pd", rules[0].GroupID) - re.Equal("default", rules[0].ID) + re.Equal(DefaultGroupID, rules[0].GroupID) + re.Equal(DefaultRuleID, rules[0].ID) re.Equal(0, rules[0].Index) re.Empty(rules[0].StartKey) re.Empty(rules[0].EndKey) re.Equal(Voter, rules[0].Role) re.Equal([]string{"zone", "rack", "host"}, rules[0].LocationLabels) - re.Equal("pd", rules[1].GroupID) - re.Equal("witness", rules[1].ID) + re.Equal(DefaultGroupID, rules[1].GroupID) + re.Equal(defaultWitnessRuleID, rules[1].ID) re.Equal(0, rules[1].Index) re.Empty(rules[1].StartKey) re.Empty(rules[1].EndKey) @@ -79,16 +79,16 @@ func TestAdjustRule(t *testing.T) { re := require.New(t) _, manager := newTestManager(t, false) rules := []Rule{ - {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3}, - {GroupID: "", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3}, - {GroupID: "group", ID: "", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3}, - {GroupID: "group", ID: "id", StartKeyHex: "123ab", EndKeyHex: "123abf", Role: "voter", Count: 3}, - {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "1123abf", Role: "voter", Count: 3}, - {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123aaa", Role: "voter", Count: 3}, + {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3}, + {GroupID: "", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3}, + {GroupID: "group", ID: "", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3}, + {GroupID: "group", ID: "id", StartKeyHex: "123ab", EndKeyHex: "123abf", Role: Voter, Count: 3}, + {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "1123abf", Role: Voter, Count: 3}, + {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123aaa", Role: Voter, Count: 3}, {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "master", Count: 3}, - {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 0}, - {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: -1}, - {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3, LabelConstraints: []LabelConstraint{{Op: "foo"}}}, + {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 0}, + {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: -1}, + {GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3, LabelConstraints: []LabelConstraint{{Op: "foo"}}}, } re.NoError(manager.adjustRule(&rules[0], "group")) @@ -101,17 +101,17 @@ func TestAdjustRule(t *testing.T) { } manager.SetKeyType(constant.Table.String()) - re.Error(manager.adjustRule(&Rule{GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3}, "group")) + re.Error(manager.adjustRule(&Rule{GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3}, "group")) manager.SetKeyType(constant.Txn.String()) - re.Error(manager.adjustRule(&Rule{GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3}, "group")) + re.Error(manager.adjustRule(&Rule{GroupID: "group", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3}, "group")) re.Error(manager.adjustRule(&Rule{ GroupID: "group", ID: "id", StartKeyHex: hex.EncodeToString(codec.EncodeBytes([]byte{0})), EndKeyHex: "123abf", - Role: "voter", + Role: Voter, Count: 3, }, "group")) @@ -120,7 +120,7 @@ func TestAdjustRule(t *testing.T) { ID: "id", StartKeyHex: hex.EncodeToString(codec.EncodeBytes([]byte{0})), EndKeyHex: hex.EncodeToString(codec.EncodeBytes([]byte{1})), - Role: "learner", + Role: Learner, Count: 1, IsWitness: true, LabelConstraints: []LabelConstraint{{Key: "engine", Op: "in", Values: []string{"tiflash"}}}, @@ -130,15 +130,15 @@ func TestAdjustRule(t *testing.T) { func TestLeaderCheck(t *testing.T) { re := require.New(t) _, manager := newTestManager(t, false) - re.Regexp(".*needs at least one leader or voter.*", manager.SetRule(&Rule{GroupID: "pd", ID: "default", Role: "learner", Count: 3}).Error()) - re.Regexp(".*define multiple leaders by count 2.*", manager.SetRule(&Rule{GroupID: "g2", ID: "33", Role: "leader", Count: 2}).Error()) + re.Regexp(".*needs at least one leader or voter.*", manager.SetRule(&Rule{GroupID: DefaultGroupID, ID: DefaultRuleID, Role: Learner, Count: 3}).Error()) + re.Regexp(".*define multiple leaders by count 2.*", manager.SetRule(&Rule{GroupID: "g2", ID: "33", Role: Leader, Count: 2}).Error()) re.Regexp(".*multiple leader replicas.*", manager.Batch([]RuleOp{ { - Rule: &Rule{GroupID: "g2", ID: "foo1", Role: "leader", Count: 1}, + Rule: &Rule{GroupID: "g2", ID: "foo1", Role: Leader, Count: 1}, Action: RuleOpAdd, }, { - Rule: &Rule{GroupID: "g2", ID: "foo2", Role: "leader", Count: 1}, + Rule: &Rule{GroupID: "g2", ID: "foo2", Role: Leader, Count: 1}, Action: RuleOpAdd, }, }).Error()) @@ -148,9 +148,9 @@ func TestSaveLoad(t *testing.T) { re := require.New(t) store, manager := newTestManager(t, false) rules := []*Rule{ - {GroupID: "pd", ID: "default", Role: "voter", Count: 5}, - {GroupID: "foo", ID: "baz", StartKeyHex: "", EndKeyHex: "abcd", Role: "voter", Count: 1}, - {GroupID: "foo", ID: "bar", Role: "learner", Count: 1}, + {GroupID: DefaultGroupID, ID: DefaultRuleID, Role: Voter, Count: 5}, + {GroupID: "foo", ID: "baz", StartKeyHex: "", EndKeyHex: "abcd", Role: Voter, Count: 1}, + {GroupID: "foo", ID: "bar", Role: Learner, Count: 1}, } for _, r := range rules { re.NoError(manager.SetRule(r.Clone())) @@ -160,7 +160,7 @@ func TestSaveLoad(t *testing.T) { err := m2.Initialize(3, []string{"no", "labels"}, "") re.NoError(err) re.Len(m2.GetAllRules(), 3) - re.Equal(rules[0].String(), m2.GetRule("pd", "default").String()) + re.Equal(rules[0].String(), m2.GetRule(DefaultGroupID, DefaultRuleID).String()) re.Equal(rules[1].String(), m2.GetRule("foo", "baz").String()) re.Equal(rules[2].String(), m2.GetRule("foo", "bar").String()) re.Equal(manager.GetRulesCount(), 3) @@ -170,14 +170,14 @@ func TestSaveLoad(t *testing.T) { func TestSetAfterGet(t *testing.T) { re := require.New(t) store, manager := newTestManager(t, false) - rule := manager.GetRule("pd", "default") + rule := manager.GetRule(DefaultGroupID, DefaultRuleID) rule.Count = 1 manager.SetRule(rule) m2 := NewRuleManager(store, nil, nil) err := m2.Initialize(100, []string{}, "") re.NoError(err) - rule = m2.GetRule("pd", "default") + rule = m2.GetRule(DefaultGroupID, DefaultRuleID) re.Equal(1, rule.Count) } @@ -193,9 +193,9 @@ func TestKeys(t *testing.T) { re := require.New(t) _, manager := newTestManager(t, false) rules := []*Rule{ - {GroupID: "1", ID: "1", Role: "voter", Count: 1, StartKeyHex: "", EndKeyHex: ""}, - {GroupID: "2", ID: "2", Role: "voter", Count: 1, StartKeyHex: "11", EndKeyHex: "ff"}, - {GroupID: "2", ID: "3", Role: "voter", Count: 1, StartKeyHex: "22", EndKeyHex: "dd"}, + {GroupID: "1", ID: "1", Role: Voter, Count: 1, StartKeyHex: "", EndKeyHex: ""}, + {GroupID: "2", ID: "2", Role: Voter, Count: 1, StartKeyHex: "11", EndKeyHex: "ff"}, + {GroupID: "2", ID: "3", Role: Voter, Count: 1, StartKeyHex: "22", EndKeyHex: "dd"}, } toDelete := []RuleOp{} @@ -207,16 +207,16 @@ func TestKeys(t *testing.T) { DeleteByIDPrefix: false, }) } - checkRules(t, manager.GetAllRules(), [][2]string{{"1", "1"}, {"2", "2"}, {"2", "3"}, {"pd", "default"}}) + checkRules(t, manager.GetAllRules(), [][2]string{{"1", "1"}, {"2", "2"}, {"2", "3"}, {DefaultGroupID, DefaultRuleID}}) manager.Batch(toDelete) - checkRules(t, manager.GetAllRules(), [][2]string{{"pd", "default"}}) + checkRules(t, manager.GetAllRules(), [][2]string{{DefaultGroupID, DefaultRuleID}}) - rules = append(rules, &Rule{GroupID: "3", ID: "4", Role: "voter", Count: 1, StartKeyHex: "44", EndKeyHex: "ee"}, - &Rule{GroupID: "3", ID: "5", Role: "voter", Count: 1, StartKeyHex: "44", EndKeyHex: "dd"}) + rules = append(rules, &Rule{GroupID: "3", ID: "4", Role: Voter, Count: 1, StartKeyHex: "44", EndKeyHex: "ee"}, + &Rule{GroupID: "3", ID: "5", Role: Voter, Count: 1, StartKeyHex: "44", EndKeyHex: "dd"}) manager.SetRules(rules) - checkRules(t, manager.GetAllRules(), [][2]string{{"1", "1"}, {"2", "2"}, {"2", "3"}, {"3", "4"}, {"3", "5"}, {"pd", "default"}}) + checkRules(t, manager.GetAllRules(), [][2]string{{"1", "1"}, {"2", "2"}, {"2", "3"}, {"3", "4"}, {"3", "5"}, {DefaultGroupID, DefaultRuleID}}) - manager.DeleteRule("pd", "default") + manager.DeleteRule(DefaultGroupID, DefaultRuleID) checkRules(t, manager.GetAllRules(), [][2]string{{"1", "1"}, {"2", "2"}, {"2", "3"}, {"3", "4"}, {"3", "5"}}) splitKeys := [][]string{ @@ -282,12 +282,12 @@ func TestKeys(t *testing.T) { func TestDeleteByIDPrefix(t *testing.T) { _, manager := newTestManager(t, false) manager.SetRules([]*Rule{ - {GroupID: "g1", ID: "foo1", Role: "voter", Count: 1}, - {GroupID: "g2", ID: "foo1", Role: "voter", Count: 1}, - {GroupID: "g2", ID: "foobar", Role: "voter", Count: 1}, - {GroupID: "g2", ID: "baz2", Role: "voter", Count: 1}, + {GroupID: "g1", ID: "foo1", Role: Voter, Count: 1}, + {GroupID: "g2", ID: "foo1", Role: Voter, Count: 1}, + {GroupID: "g2", ID: "foobar", Role: Voter, Count: 1}, + {GroupID: "g2", ID: "baz2", Role: Voter, Count: 1}, }) - manager.DeleteRule("pd", "default") + manager.DeleteRule(DefaultGroupID, DefaultRuleID) checkRules(t, manager.GetAllRules(), [][2]string{{"g1", "foo1"}, {"g2", "baz2"}, {"g2", "foo1"}, {"g2", "foobar"}}) manager.Batch([]RuleOp{{ @@ -301,40 +301,40 @@ func TestDeleteByIDPrefix(t *testing.T) { func TestRangeGap(t *testing.T) { re := require.New(t) _, manager := newTestManager(t, false) - err := manager.DeleteRule("pd", "default") + err := manager.DeleteRule(DefaultGroupID, DefaultRuleID) re.Error(err) - err = manager.SetRule(&Rule{GroupID: "pd", ID: "foo", StartKeyHex: "", EndKeyHex: "abcd", Role: "voter", Count: 1}) + err = manager.SetRule(&Rule{GroupID: DefaultGroupID, ID: "foo", StartKeyHex: "", EndKeyHex: "abcd", Role: Voter, Count: 1}) re.NoError(err) // |-- default --| // |-- foo --| // still cannot delete default since it will cause ("abcd", "") has no rules inside. - err = manager.DeleteRule("pd", "default") + err = manager.DeleteRule(DefaultGroupID, DefaultRuleID) re.Error(err) - err = manager.SetRule(&Rule{GroupID: "pd", ID: "bar", StartKeyHex: "abcd", EndKeyHex: "", Role: "voter", Count: 1}) + err = manager.SetRule(&Rule{GroupID: DefaultGroupID, ID: "bar", StartKeyHex: "abcd", EndKeyHex: "", Role: Voter, Count: 1}) re.NoError(err) // now default can be deleted. - err = manager.DeleteRule("pd", "default") + err = manager.DeleteRule(DefaultGroupID, DefaultRuleID) re.NoError(err) // cannot change range since it will cause ("abaa", "abcd") has no rules inside. - err = manager.SetRule(&Rule{GroupID: "pd", ID: "foo", StartKeyHex: "", EndKeyHex: "abaa", Role: "voter", Count: 1}) + err = manager.SetRule(&Rule{GroupID: DefaultGroupID, ID: "foo", StartKeyHex: "", EndKeyHex: "abaa", Role: Voter, Count: 1}) re.Error(err) } func TestGroupConfig(t *testing.T) { re := require.New(t) _, manager := newTestManager(t, false) - pd1 := &RuleGroup{ID: "pd"} - re.Equal(pd1, manager.GetRuleGroup("pd")) + pd1 := &RuleGroup{ID: DefaultGroupID} + re.Equal(pd1, manager.GetRuleGroup(DefaultGroupID)) // update group pd - pd2 := &RuleGroup{ID: "pd", Index: 100, Override: true} + pd2 := &RuleGroup{ID: DefaultGroupID, Index: 100, Override: true} err := manager.SetRuleGroup(pd2) re.NoError(err) - re.Equal(pd2, manager.GetRuleGroup("pd")) + re.Equal(pd2, manager.GetRuleGroup(DefaultGroupID)) // new group g without config - err = manager.SetRule(&Rule{GroupID: "g", ID: "1", Role: "voter", Count: 1}) + err = manager.SetRule(&Rule{GroupID: "g", ID: "1", Role: Voter, Count: 1}) re.NoError(err) g1 := &RuleGroup{ID: "g"} re.Equal(g1, manager.GetRuleGroup("g")) @@ -347,12 +347,12 @@ func TestGroupConfig(t *testing.T) { re.Equal([]*RuleGroup{g2, pd2}, manager.GetRuleGroups()) // delete pd group, restore to default config - err = manager.DeleteRuleGroup("pd") + err = manager.DeleteRuleGroup(DefaultGroupID) re.NoError(err) re.Equal([]*RuleGroup{pd1, g2}, manager.GetRuleGroups()) // delete rule, the group is removed too - err = manager.DeleteRule("pd", "default") + err = manager.DeleteRule(DefaultGroupID, DefaultRuleID) re.NoError(err) re.Equal([]*RuleGroup{g2}, manager.GetRuleGroups()) } @@ -360,16 +360,16 @@ func TestGroupConfig(t *testing.T) { func TestRuleVersion(t *testing.T) { re := require.New(t) _, manager := newTestManager(t, false) - rule1 := manager.GetRule("pd", "default") + rule1 := manager.GetRule(DefaultGroupID, DefaultRuleID) re.Equal(uint64(0), rule1.Version) // create new rule - newRule := &Rule{GroupID: "g1", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 3} + newRule := &Rule{GroupID: "g1", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 3} err := manager.SetRule(newRule) re.NoError(err) newRule = manager.GetRule("g1", "id") re.Equal(uint64(0), newRule.Version) // update rule - newRule = &Rule{GroupID: "g1", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: "voter", Count: 2} + newRule = &Rule{GroupID: "g1", ID: "id", StartKeyHex: "123abc", EndKeyHex: "123abf", Role: Voter, Count: 2} err = manager.SetRule(newRule) re.NoError(err) newRule = manager.GetRule("g1", "id") diff --git a/pkg/schedule/placement/rule_test.go b/pkg/schedule/placement/rule_test.go index b91a1f22d65..75d7bab23c9 100644 --- a/pkg/schedule/placement/rule_test.go +++ b/pkg/schedule/placement/rule_test.go @@ -110,9 +110,9 @@ func TestGroupProperties(t *testing.T) { func TestBuildRuleList(t *testing.T) { re := require.New(t) defaultRule := &Rule{ - GroupID: "pd", - ID: "default", - Role: "voter", + GroupID: DefaultGroupID, + ID: DefaultRuleID, + Role: Voter, StartKey: []byte{}, EndKey: []byte{}, Count: 3, @@ -122,13 +122,13 @@ func TestBuildRuleList(t *testing.T) { byteEnd, err := hex.DecodeString("a2") re.NoError(err) ruleMeta := &Rule{ - GroupID: "pd", + GroupID: DefaultGroupID, ID: "meta", Index: 1, Override: true, StartKey: byteStart, EndKey: byteEnd, - Role: "voter", + Role: Voter, Count: 5, } @@ -140,7 +140,7 @@ func TestBuildRuleList(t *testing.T) { { name: "default rule", rules: map[[2]string]*Rule{ - {"pd", "default"}: defaultRule, + {DefaultGroupID, DefaultRuleID}: defaultRule, }, expect: ruleList{ ranges: []rangeRules{ @@ -155,8 +155,8 @@ func TestBuildRuleList(t *testing.T) { { name: "metadata case", rules: map[[2]string]*Rule{ - {"pd", "default"}: defaultRule, - {"pd", "meta"}: ruleMeta, + {DefaultGroupID, DefaultRuleID}: defaultRule, + {DefaultGroupID, "meta"}: ruleMeta, }, expect: ruleList{ranges: []rangeRules{ { diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index 681b863aea6..70517d23fee 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -185,7 +185,7 @@ func scatterSpecial(re *require.Assertions, numOrdinaryStores, numSpecialStores, } tc.SetEnablePlacementRules(true) re.NoError(tc.RuleManager.SetRule(&placement.Rule{ - GroupID: "pd", ID: "learner", Role: placement.Learner, Count: 3, + GroupID: placement.DefaultGroupID, ID: "learner", Role: placement.Learner, Count: 3, LabelConstraints: []placement.LabelConstraint{{Key: "engine", Op: placement.In, Values: []string{"tiflash"}}}})) // Region 1 has the same distribution with the Region 2, which is used to test selectPeerToReplace. @@ -575,8 +575,8 @@ func TestRegionHasLearner(t *testing.T) { tc.AddLabelsStore(i, 0, map[string]string{"zone": "z2"}) } tc.RuleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3, LabelConstraints: []placement.LabelConstraint{ @@ -588,7 +588,7 @@ func TestRegionHasLearner(t *testing.T) { }, }) tc.RuleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "learner", Role: placement.Learner, Count: 1, diff --git a/pkg/schedule/schedulers/balance_witness_test.go b/pkg/schedule/schedulers/balance_witness_test.go index abd4a3b3bba..59bf04c2303 100644 --- a/pkg/schedule/schedulers/balance_witness_test.go +++ b/pkg/schedule/schedulers/balance_witness_test.go @@ -43,8 +43,8 @@ func (suite *balanceWitnessSchedulerTestSuite) SetupTest() { suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() suite.tc.RuleManager.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 4, }, diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index d8f9bbc532c..15c037ddd22 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -582,8 +582,8 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { tc.SetHotRegionCacheHitsThreshold(0) re.NoError(tc.RuleManager.SetRules([]*placement.Rule{ { - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3, LocationLabels: []string{"zone", "host"}, @@ -1143,7 +1143,7 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { tc.AddRegionStore(3, 20) err = tc.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "leader", Index: 1, Override: true, @@ -1161,7 +1161,7 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { }) re.NoError(err) err = tc.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "voter", Index: 2, Override: false, diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 12ab9f8aa2f..57f1fcf1e3f 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -261,13 +261,13 @@ func TestShuffleRegionRole(t *testing.T) { // update rule to 1leader+1follower+1learner tc.SetEnablePlacementRules(true) tc.RuleManager.SetRule(&placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Role: placement.Voter, Count: 2, }) tc.RuleManager.SetRule(&placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "learner", Role: placement.Learner, Count: 1, @@ -428,8 +428,8 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { { name: "default Rule", rule: &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 1, StartKey: []byte(""), EndKey: []byte(""), @@ -442,8 +442,8 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { { name: "single store allowed to be placed leader", rule: &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 1, StartKey: []byte(""), EndKey: []byte(""), @@ -463,8 +463,8 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { { name: "2 store allowed to be placed leader", rule: &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, Index: 1, StartKey: []byte(""), EndKey: []byte(""), diff --git a/server/api/cluster_test.go b/server/api/cluster_test.go index 01aa6ba5f24..d6d8effa365 100644 --- a/server/api/cluster_test.go +++ b/server/api/cluster_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/suite" sc "github.com/tikv/pd/pkg/schedule/config" + "github.com/tikv/pd/pkg/schedule/placement" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/cluster" @@ -57,7 +58,7 @@ func (suite *clusterTestSuite) TestCluster() { suite.svr.GetPersistOptions().SetPlacementRuleEnabled(true) suite.svr.GetPersistOptions().GetReplicationConfig().LocationLabels = []string{"host"} rm := suite.svr.GetRaftCluster().GetRuleManager() - rule := rm.GetRule("pd", "default") + rule := rm.GetRule(placement.DefaultGroupID, placement.DefaultRuleID) rule.LocationLabels = []string{"host"} rule.Count = 1 rm.SetRule(rule) @@ -81,7 +82,7 @@ func (suite *clusterTestSuite) TestCluster() { c1.MaxPeerCount = 6 suite.Equal(c2, c1) - suite.Equal(int(r.MaxReplicas), suite.svr.GetRaftCluster().GetRuleManager().GetRule("pd", "default").Count) + suite.Equal(int(r.MaxReplicas), suite.svr.GetRaftCluster().GetRuleManager().GetRule(placement.DefaultGroupID, placement.DefaultRuleID).Count) } func (suite *clusterTestSuite) testGetClusterStatus() { diff --git a/server/api/region_test.go b/server/api/region_test.go index 379fcf7d463..0f8f84bfc37 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -697,7 +697,7 @@ func (suite *regionsReplicatedTestSuite) TestCheckRegionsReplicated() { Index: 5, Rules: []*placement.Rule{ { - ID: "foo", Index: 1, Role: "voter", Count: 1, + ID: "foo", Index: 1, Role: placement.Voter, Count: 1, }, }, }, @@ -738,7 +738,7 @@ func (suite *regionsReplicatedTestSuite) TestCheckRegionsReplicated() { mustRegionHeartbeat(re, suite.svr, r1) bundle[0].Rules = append(bundle[0].Rules, &placement.Rule{ - ID: "bar", Index: 1, Role: "voter", Count: 1, + ID: "bar", Index: 1, Role: placement.Voter, Count: 1, }) data, err = json.Marshal(bundle) suite.NoError(err) @@ -755,7 +755,7 @@ func (suite *regionsReplicatedTestSuite) TestCheckRegionsReplicated() { Index: 6, Rules: []*placement.Rule{ { - ID: "foo", Index: 1, Role: "voter", Count: 2, + ID: "foo", Index: 1, Role: placement.Voter, Count: 2, }, }, }) diff --git a/server/api/rule.go b/server/api/rule.go index 47964d594be..bdb3db2016d 100644 --- a/server/api/rule.go +++ b/server/api/rule.go @@ -273,7 +273,7 @@ func (h *ruleHandler) SetRule(w http.ResponseWriter, r *http.Request) { // sync replicate config with default-rule func (h *ruleHandler) syncReplicateConfigWithDefaultRule(rule *placement.Rule) error { // sync default rule with replicate config - if rule.GroupID == "pd" && rule.ID == "default" { + if rule.GroupID == placement.DefaultGroupID && rule.ID == placement.DefaultRuleID { cfg := h.svr.GetReplicationConfig().Clone() cfg.MaxReplicas = uint64(rule.Count) if err := h.svr.SetReplicationConfig(*cfg); err != nil { diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 4b9b401e0c9..d424ea98e7b 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -1669,7 +1669,7 @@ func TestCalculateStoreSize1(t *testing.T) { } cluster.ruleManager.SetRule( - &placement.Rule{GroupID: "pd", ID: "zone1", StartKey: []byte(""), EndKey: []byte(""), Role: "voter", Count: 2, + &placement.Rule{GroupID: placement.DefaultGroupID, ID: "zone1", StartKey: []byte(""), EndKey: []byte(""), Role: placement.Voter, Count: 2, LabelConstraints: []placement.LabelConstraint{ {Key: "zone", Op: "in", Values: []string{"zone1"}}, }, @@ -1677,7 +1677,7 @@ func TestCalculateStoreSize1(t *testing.T) { ) cluster.ruleManager.SetRule( - &placement.Rule{GroupID: "pd", ID: "zone2", StartKey: []byte(""), EndKey: []byte(""), Role: "voter", Count: 2, + &placement.Rule{GroupID: placement.DefaultGroupID, ID: "zone2", StartKey: []byte(""), EndKey: []byte(""), Role: placement.Voter, Count: 2, LabelConstraints: []placement.LabelConstraint{ {Key: "zone", Op: "in", Values: []string{"zone2"}}, }, @@ -1685,13 +1685,13 @@ func TestCalculateStoreSize1(t *testing.T) { ) cluster.ruleManager.SetRule( - &placement.Rule{GroupID: "pd", ID: "zone3", StartKey: []byte(""), EndKey: []byte(""), Role: "follower", Count: 1, + &placement.Rule{GroupID: placement.DefaultGroupID, ID: "zone3", StartKey: []byte(""), EndKey: []byte(""), Role: placement.Follower, Count: 1, LabelConstraints: []placement.LabelConstraint{ {Key: "zone", Op: "in", Values: []string{"zone3"}}, }, LocationLabels: []string{"rack", "host"}}, ) - cluster.ruleManager.DeleteRule("pd", "default") + cluster.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) regions := newTestRegions(100, 10, 5) for _, region := range regions { @@ -1753,7 +1753,7 @@ func TestCalculateStoreSize2(t *testing.T) { } cluster.ruleManager.SetRule( - &placement.Rule{GroupID: "pd", ID: "dc1", StartKey: []byte(""), EndKey: []byte(""), Role: "voter", Count: 2, + &placement.Rule{GroupID: placement.DefaultGroupID, ID: "dc1", StartKey: []byte(""), EndKey: []byte(""), Role: placement.Voter, Count: 2, LabelConstraints: []placement.LabelConstraint{ {Key: "dc", Op: "in", Values: []string{"dc1"}}, }, @@ -1761,7 +1761,7 @@ func TestCalculateStoreSize2(t *testing.T) { ) cluster.ruleManager.SetRule( - &placement.Rule{GroupID: "pd", ID: "logic3", StartKey: []byte(""), EndKey: []byte(""), Role: "voter", Count: 1, + &placement.Rule{GroupID: placement.DefaultGroupID, ID: "logic3", StartKey: []byte(""), EndKey: []byte(""), Role: placement.Voter, Count: 1, LabelConstraints: []placement.LabelConstraint{ {Key: "logic", Op: "in", Values: []string{"logic3"}}, }, @@ -1769,13 +1769,13 @@ func TestCalculateStoreSize2(t *testing.T) { ) cluster.ruleManager.SetRule( - &placement.Rule{GroupID: "pd", ID: "logic4", StartKey: []byte(""), EndKey: []byte(""), Role: "learner", Count: 1, + &placement.Rule{GroupID: placement.DefaultGroupID, ID: "logic4", StartKey: []byte(""), EndKey: []byte(""), Role: placement.Learner, Count: 1, LabelConstraints: []placement.LabelConstraint{ {Key: "logic", Op: "in", Values: []string{"logic4"}}, }, LocationLabels: []string{"dc", "logic", "rack", "host"}}, ) - cluster.ruleManager.DeleteRule("pd", "default") + cluster.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) regions := newTestRegions(100, 10, 5) for _, region := range regions { diff --git a/server/server.go b/server/server.go index 38064a3b92f..d4b40af9c18 100644 --- a/server/server.go +++ b/server/server.go @@ -1043,7 +1043,7 @@ func (s *Server) SetReplicationConfig(cfg sc.ReplicationConfig) error { return errs.ErrNotBootstrapped.GenWithStackByArgs() } // replication.MaxReplicas won't work when placement rule is enabled and not only have one default rule. - defaultRule := rc.GetRuleManager().GetRule("pd", "default") + defaultRule := rc.GetRuleManager().GetRule(placement.DefaultGroupID, placement.DefaultRuleID) CheckInDefaultRule := func() error { // replication config won't work when placement rule is enabled and exceeds one default rule diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index 03d90c6cd32..d2c88d01f09 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/suite" pd "github.com/tikv/pd/client/http" + "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/tests" ) @@ -85,3 +86,45 @@ func (suite *httpClientTestSuite) TestGetMinResolvedTSByStoresIDs() { re.Equal(minResolvedTS, storeMinResolvedTSMap[1]) re.Equal(uint64(math.MaxUint64), storeMinResolvedTSMap[2]) } + +func (suite *httpClientTestSuite) TestRule() { + re := suite.Require() + rules, err := suite.client.GetPlacementRulesByGroup(suite.ctx, placement.DefaultGroupID) + re.NoError(err) + re.Len(rules, 1) + re.Equal(placement.DefaultGroupID, rules[0].GroupID) + re.Equal(placement.DefaultRuleID, rules[0].ID) + re.Equal(pd.Voter, rules[0].Role) + re.Equal(3, rules[0].Count) + err = suite.client.SetPlacementRule(suite.ctx, &pd.Rule{ + GroupID: placement.DefaultGroupID, + ID: "test", + Role: pd.Learner, + Count: 3, + }) + re.NoError(err) + rules, err = suite.client.GetPlacementRulesByGroup(suite.ctx, placement.DefaultGroupID) + re.NoError(err) + re.Len(rules, 2) + re.Equal(placement.DefaultGroupID, rules[1].GroupID) + re.Equal("test", rules[1].ID) + re.Equal(pd.Learner, rules[1].Role) + re.Equal(3, rules[1].Count) + err = suite.client.DeletePlacementRule(suite.ctx, placement.DefaultGroupID, "test") + re.NoError(err) + rules, err = suite.client.GetPlacementRulesByGroup(suite.ctx, placement.DefaultGroupID) + re.NoError(err) + re.Len(rules, 1) + re.Equal(placement.DefaultGroupID, rules[0].GroupID) + re.Equal(placement.DefaultRuleID, rules[0].ID) +} + +func (suite *httpClientTestSuite) TestAccelerateSchedule() { + re := suite.Require() + suspectRegions := suite.cluster.GetLeaderServer().GetRaftCluster().GetSuspectRegions() + re.Len(suspectRegions, 0) + err := suite.client.AccelerateSchedule(suite.ctx, []byte("a1"), []byte("a2")) + re.NoError(err) + suspectRegions = suite.cluster.GetLeaderServer().GetRaftCluster().GetSuspectRegions() + re.Len(suspectRegions, 1) +} diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index cfeaa4db033..f6a7f66a66f 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -241,9 +241,9 @@ func (suite *apiTestSuite) TestAPIForward() { tests.MustPutRegion(re, suite.cluster, 2, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) rules = []*placement.Rule{ { - GroupID: "pd", - ID: "default", - Role: "voter", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, + Role: placement.Voter, Count: 3, LocationLabels: []string{}, }, diff --git a/tests/integrations/mcs/scheduling/rule_test.go b/tests/integrations/mcs/scheduling/rule_test.go index bffa58d0fe6..761e9b1ecbc 100644 --- a/tests/integrations/mcs/scheduling/rule_test.go +++ b/tests/integrations/mcs/scheduling/rule_test.go @@ -76,8 +76,8 @@ func (suite *ruleTestSuite) TestRuleWatch() { // Check the default rule and rule group. rules := ruleManager.GetAllRules() re.Len(rules, 1) - re.Equal("pd", rules[0].GroupID) - re.Equal("default", rules[0].ID) + re.Equal(placement.DefaultGroupID, rules[0].GroupID) + re.Equal(placement.DefaultRuleID, rules[0].ID) re.Equal(0, rules[0].Index) re.Empty(rules[0].StartKey) re.Empty(rules[0].EndKey) @@ -85,7 +85,7 @@ func (suite *ruleTestSuite) TestRuleWatch() { re.Empty(rules[0].LocationLabels) ruleGroups := ruleManager.GetRuleGroups() re.Len(ruleGroups, 1) - re.Equal("pd", ruleGroups[0].ID) + re.Equal(placement.DefaultGroupID, ruleGroups[0].ID) re.Equal(0, ruleGroups[0].Index) re.False(ruleGroups[0].Override) // Set a new rule via the PD API server. @@ -93,7 +93,7 @@ func (suite *ruleTestSuite) TestRuleWatch() { rule := &placement.Rule{ GroupID: "2", ID: "3", - Role: "voter", + Role: placement.Voter, Count: 1, StartKeyHex: "22", EndKeyHex: "dd", @@ -122,7 +122,7 @@ func (suite *ruleTestSuite) TestRuleWatch() { return len(rules) == 1 }) re.Len(rules, 1) - re.Equal("pd", rules[0].GroupID) + re.Equal(placement.DefaultGroupID, rules[0].GroupID) // Create a new rule group. ruleGroup := &placement.RuleGroup{ ID: "2", diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 315ec3cf7c7..91d6723c2ac 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -315,7 +315,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { re.Contains(string(output), "Success!") // test show - suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "default"}}) + suite.checkShowRuleKey(pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) f, _ := os.CreateTemp("/tmp", "pd_tests") fname := f.Name() @@ -323,18 +323,18 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { defer os.RemoveAll(fname) // test load - rules := suite.checkLoadRule(pdAddr, fname, [][2]string{{"pd", "default"}}) + rules := suite.checkLoadRule(pdAddr, fname, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) // test save rules = append(rules, placement.Rule{ - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test1", - Role: "voter", + Role: placement.Voter, Count: 1, }, placement.Rule{ GroupID: "test-group", ID: "test2", - Role: "voter", + Role: placement.Voter, Count: 2, }) b, _ := json.Marshal(rules) @@ -343,11 +343,11 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { re.NoError(err) // test show group - suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "default"}, {"pd", "test1"}}, "--group=pd") + suite.checkShowRuleKey(pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}, {placement.DefaultGroupID, "test1"}}, "--group=pd") // test rule region detail tests.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b")) - suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "default"}}, "--region=1", "--detail") + suite.checkShowRuleKey(pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}, "--region=1", "--detail") // test delete // need clear up args, so create new a cobra.Command. Otherwise gourp still exists. @@ -356,7 +356,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { os.WriteFile(fname, b, 0600) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) re.NoError(err) - suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "test1"}}, "--group=pd") + suite.checkShowRuleKey(pdAddr, [][2]string{{placement.DefaultGroupID, "test1"}}, "--group=pd") } func (suite *configTestSuite) TestPlacementRuleGroups() { @@ -385,15 +385,15 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluste // test show var group placement.RuleGroup testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "pd") + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", placement.DefaultGroupID) re.NoError(err) return !strings.Contains(string(output), "404") }) re.NoError(json.Unmarshal(output, &group), string(output)) - re.Equal(placement.RuleGroup{ID: "pd"}, group) + re.Equal(placement.RuleGroup{ID: placement.DefaultGroupID}, group) // test set - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "set", "pd", "42", "true") + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "set", placement.DefaultGroupID, "42", "true") re.NoError(err) re.Contains(string(output), "Success!") output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "set", "group2", "100", "false") @@ -410,7 +410,7 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluste re.NoError(err) re.NoError(json.Unmarshal(output, &groups)) return reflect.DeepEqual([]placement.RuleGroup{ - {ID: "pd", Index: 42, Override: true}, + {ID: placement.DefaultGroupID, Index: 42, Override: true}, {ID: "group2", Index: 100, Override: false}, {ID: "group3", Index: 200, Override: false}, }, groups) @@ -464,10 +464,10 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste // test get var bundle placement.GroupBundle - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "get", "pd") + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "get", placement.DefaultGroupID) re.NoError(err) re.NoError(json.Unmarshal(output, &bundle)) - re.Equal(placement.GroupBundle{ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, bundle) + re.Equal(placement.GroupBundle{ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, bundle) f, err := os.CreateTemp("/tmp", "pd_tests") re.NoError(err) @@ -477,7 +477,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste // test load suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ - {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, + {ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) // test set @@ -489,41 +489,41 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ - {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, - {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, + {ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, + {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) // test delete - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "pd") + _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", placement.DefaultGroupID) re.NoError(err) suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ - {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, + {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) // test delete regexp bundle.ID = "pf" - bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}} + bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}} b, err = json.Marshal(bundle) re.NoError(err) re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ - {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, - {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, + {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, + {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "--regexp", ".*f") re.NoError(err) bundles := []placement.GroupBundle{ - {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, + {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, } suite.checkLoadRuleBundle(pdAddr, fname, bundles) // test save - bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}} + bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}} bundles = append(bundles, bundle) b, err = json.Marshal(bundles) re.NoError(err) @@ -531,8 +531,8 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname) re.NoError(err) suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ - {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, - {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, + {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, + {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) // partial update, so still one group is left, no error @@ -544,7 +544,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste re.NoError(err) suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ - {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, + {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) } @@ -715,7 +715,7 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.Tes } checkRuleCount := func(expect int) { - args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} + args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", placement.DefaultGroupID, "--id", placement.DefaultRuleID} output, err := pdctl.ExecuteCommand(cmd, args...) re.NoError(err) rule := placement.Rule{} @@ -726,7 +726,7 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.Tes } checkRuleLocationLabels := func(expect int) { - args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} + args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", placement.DefaultGroupID, "--id", placement.DefaultRuleID} output, err := pdctl.ExecuteCommand(cmd, args...) re.NoError(err) rule := placement.Rule{} @@ -737,7 +737,7 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.Tes } checkRuleIsolationLevel := func(expect string) { - args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} + args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", placement.DefaultGroupID, "--id", placement.DefaultRuleID} output, err := pdctl.ExecuteCommand(cmd, args...) re.NoError(err) rule := placement.Rule{} @@ -791,7 +791,7 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.Tes fname := suite.T().TempDir() rules := []placement.Rule{ { - GroupID: "pd", + GroupID: placement.DefaultGroupID, ID: "test1", Role: "voter", Count: 1, diff --git a/tests/server/api/operator_test.go b/tests/server/api/operator_test.go index 908daf21aac..e36ead7e44d 100644 --- a/tests/server/api/operator_test.go +++ b/tests/server/api/operator_test.go @@ -461,7 +461,7 @@ func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *te // add customized rule first and then remove default rule err := manager.SetRules(testCase.rules) suite.NoError(err) - err = manager.DeleteRule("pd", "default") + err = manager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) suite.NoError(err) } if testCase.expectedError == nil { diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 6d292021767..861fbe5cf32 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -56,7 +56,7 @@ func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} successData, err := json.Marshal(rule) suite.NoError(err) oldStartKey, err := hex.DecodeString(rule.StartKeyHex) @@ -64,13 +64,13 @@ func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { oldEndKey, err := hex.DecodeString(rule.EndKeyHex) suite.NoError(err) parseErrData := []byte("foo") - rule1 := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "XXXX", EndKeyHex: "3333", Role: "voter", Count: 1} + rule1 := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "XXXX", EndKeyHex: "3333", Role: placement.Voter, Count: 1} checkErrData, err := json.Marshal(rule1) suite.NoError(err) - rule2 := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: -1} + rule2 := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: -1} setErrData, err := json.Marshal(rule2) suite.NoError(err) - rule3 := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "1111", EndKeyHex: "3333", Role: "follower", Count: 3} + rule3 := placement.Rule{GroupID: "a", ID: "10", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Follower, Count: 3} updateData, err := json.Marshal(rule3) suite.NoError(err) newStartKey, err := hex.DecodeString(rule.StartKeyHex) @@ -179,7 +179,7 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule := placement.Rule{GroupID: "a", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "a", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} data, err := json.Marshal(rule) suite.NoError(err) re := suite.Require() @@ -200,7 +200,7 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { }, { name: "not found", - rule: placement.Rule{GroupID: "a", ID: "30", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1}, + rule: placement.Rule{GroupID: "a", ID: "30", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1}, found: false, code: http.StatusNotFound, }, @@ -237,7 +237,7 @@ func (suite *ruleTestSuite) checkGetAll(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule := placement.Rule{GroupID: "b", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "b", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} data, err := json.Marshal(rule) suite.NoError(err) re := suite.Require() @@ -266,16 +266,16 @@ func (suite *ruleTestSuite) checkSetAll(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule1 := placement.Rule{GroupID: "a", ID: "12", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} - rule2 := placement.Rule{GroupID: "b", ID: "12", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} - rule3 := placement.Rule{GroupID: "a", ID: "12", StartKeyHex: "XXXX", EndKeyHex: "3333", Role: "voter", Count: 1} - rule4 := placement.Rule{GroupID: "a", ID: "12", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: -1} - rule5 := placement.Rule{GroupID: "pd", ID: "default", StartKeyHex: "", EndKeyHex: "", Role: "voter", Count: 1, + rule1 := placement.Rule{GroupID: "a", ID: "12", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} + rule2 := placement.Rule{GroupID: "b", ID: "12", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} + rule3 := placement.Rule{GroupID: "a", ID: "12", StartKeyHex: "XXXX", EndKeyHex: "3333", Role: placement.Voter, Count: 1} + rule4 := placement.Rule{GroupID: "a", ID: "12", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: -1} + rule5 := placement.Rule{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, StartKeyHex: "", EndKeyHex: "", Role: placement.Voter, Count: 1, LocationLabels: []string{"host"}} - rule6 := placement.Rule{GroupID: "pd", ID: "default", StartKeyHex: "", EndKeyHex: "", Role: "voter", Count: 3} + rule6 := placement.Rule{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, StartKeyHex: "", EndKeyHex: "", Role: placement.Voter, Count: 3} leaderServer.GetPersistOptions().GetReplicationConfig().LocationLabels = []string{"host"} - defaultRule := leaderServer.GetRaftCluster().GetRuleManager().GetRule("pd", "default") + defaultRule := leaderServer.GetRaftCluster().GetRuleManager().GetRule(placement.DefaultGroupID, placement.DefaultRuleID) defaultRule.LocationLabels = []string{"host"} leaderServer.GetRaftCluster().GetRuleManager().SetRule(defaultRule) @@ -390,13 +390,13 @@ func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) re := suite.Require() - rule := placement.Rule{GroupID: "c", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "c", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} data, err := json.Marshal(rule) suite.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/rule", data, tu.StatusOK(re)) suite.NoError(err) - rule1 := placement.Rule{GroupID: "c", ID: "30", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} + rule1 := placement.Rule{GroupID: "c", ID: "30", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} data, err = json.Marshal(rule1) suite.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/rule", data, tu.StatusOK(re)) @@ -453,7 +453,7 @@ func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule := placement.Rule{GroupID: "e", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "e", ID: "20", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1} data, err := json.Marshal(rule) suite.NoError(err) re := suite.Require() @@ -525,7 +525,7 @@ func (suite *ruleTestSuite) checkGetAllByKey(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule := placement.Rule{GroupID: "f", ID: "40", StartKeyHex: "8888", EndKeyHex: "9111", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "f", ID: "40", StartKeyHex: "8888", EndKeyHex: "9111", Role: placement.Voter, Count: 1} data, err := json.Marshal(rule) suite.NoError(err) re := suite.Require() @@ -589,7 +589,7 @@ func (suite *ruleTestSuite) checkDelete(cluster *tests.TestCluster) { pdAddr := leaderServer.GetAddr() urlPrefix := fmt.Sprintf("%s%s/api/v1/config", pdAddr, apiPrefix) - rule := placement.Rule{GroupID: "g", ID: "10", StartKeyHex: "8888", EndKeyHex: "9111", Role: "voter", Count: 1} + rule := placement.Rule{GroupID: "g", ID: "10", StartKeyHex: "8888", EndKeyHex: "9111", Role: placement.Voter, Count: 1} data, err := json.Marshal(rule) suite.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/rule", data, tu.StatusOK(suite.Require())) @@ -663,19 +663,19 @@ func (suite *ruleTestSuite) checkBatch(cluster *tests.TestCluster) { opt1 := placement.RuleOp{ Action: placement.RuleOpAdd, - Rule: &placement.Rule{GroupID: "a", ID: "13", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1}, + Rule: &placement.Rule{GroupID: "a", ID: "13", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1}, } opt2 := placement.RuleOp{ Action: placement.RuleOpAdd, - Rule: &placement.Rule{GroupID: "b", ID: "13", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1}, + Rule: &placement.Rule{GroupID: "b", ID: "13", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1}, } opt3 := placement.RuleOp{ Action: placement.RuleOpAdd, - Rule: &placement.Rule{GroupID: "a", ID: "14", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1}, + Rule: &placement.Rule{GroupID: "a", ID: "14", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1}, } opt4 := placement.RuleOp{ Action: placement.RuleOpAdd, - Rule: &placement.Rule{GroupID: "a", ID: "15", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: 1}, + Rule: &placement.Rule{GroupID: "a", ID: "15", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: 1}, } opt5 := placement.RuleOp{ Action: placement.RuleOpDel, @@ -692,11 +692,11 @@ func (suite *ruleTestSuite) checkBatch(cluster *tests.TestCluster) { } opt8 := placement.RuleOp{ Action: placement.RuleOpAdd, - Rule: &placement.Rule{GroupID: "a", ID: "16", StartKeyHex: "XXXX", EndKeyHex: "3333", Role: "voter", Count: 1}, + Rule: &placement.Rule{GroupID: "a", ID: "16", StartKeyHex: "XXXX", EndKeyHex: "3333", Role: placement.Voter, Count: 1}, } opt9 := placement.RuleOp{ Action: placement.RuleOpAdd, - Rule: &placement.Rule{GroupID: "a", ID: "17", StartKeyHex: "1111", EndKeyHex: "3333", Role: "voter", Count: -1}, + Rule: &placement.Rule{GroupID: "a", ID: "17", StartKeyHex: "1111", EndKeyHex: "3333", Role: placement.Voter, Count: -1}, } successData1, err := json.Marshal([]placement.RuleOp{opt1, opt2, opt3}) @@ -800,9 +800,14 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { re := suite.Require() // GetAll b1 := placement.GroupBundle{ - ID: "pd", + ID: placement.DefaultGroupID, Rules: []*placement.Rule{ - {GroupID: "pd", ID: "default", Role: "voter", Count: 3}, + { + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, + Role: placement.Voter, + Count: 3, + }, }, } var bundles []placement.GroupBundle @@ -817,7 +822,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { Index: 42, Override: true, Rules: []*placement.Rule{ - {GroupID: "foo", ID: "bar", Index: 1, Override: true, Role: "voter", Count: 1}, + {GroupID: "foo", ID: "bar", Index: 1, Override: true, Role: placement.Voter, Count: 1}, }, } data, err := json.Marshal(b2) @@ -849,7 +854,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { suite.compareBundle(bundles[0], b2) // SetAll - b2.Rules = append(b2.Rules, &placement.Rule{GroupID: "foo", ID: "baz", Index: 2, Role: "follower", Count: 1}) + b2.Rules = append(b2.Rules, &placement.Rule{GroupID: "foo", ID: "baz", Index: 2, Role: placement.Follower, Count: 1}) b2.Index, b2.Override = 0, false b3 := placement.GroupBundle{ID: "foobar", Index: 100} data, err = json.Marshal([]placement.GroupBundle{b1, b2, b3}) @@ -880,7 +885,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { b4 := placement.GroupBundle{ Index: 4, Rules: []*placement.Rule{ - {ID: "bar", Index: 1, Override: true, Role: "voter", Count: 1}, + {ID: "bar", Index: 1, Override: true, Role: placement.Voter, Count: 1}, }, } data, err = json.Marshal(b4) @@ -908,7 +913,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { ID: "rule-without-group-id-2", Index: 5, Rules: []*placement.Rule{ - {ID: "bar", Index: 1, Override: true, Role: "voter", Count: 1}, + {ID: "bar", Index: 1, Override: true, Role: placement.Voter, Count: 1}, }, } data, err = json.Marshal([]placement.GroupBundle{b1, b4, b5}) diff --git a/tools/pd-simulator/simulator/cases/diagnose_rule.go b/tools/pd-simulator/simulator/cases/diagnose_rule.go index b4b30fdc772..6cd76c854b7 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_rule.go +++ b/tools/pd-simulator/simulator/cases/diagnose_rule.go @@ -46,8 +46,8 @@ func newRule1() *Case { }, LocationLabels: []string{"host"}, }, &placement.Rule{ - GroupID: "pd", - ID: "default", + GroupID: placement.DefaultGroupID, + ID: placement.DefaultRuleID, StartKeyHex: "", EndKeyHex: "", Role: placement.Voter,