From 5054d41ee91437b2a94b762a8cd780381e2fc733 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 19 May 2023 14:11:04 +0800 Subject: [PATCH 1/2] introduce the cluster informer for decoupling the dependencies Signed-off-by: Ryan Leung --- pkg/keyspace/keyspace.go | 6 ++-- pkg/mock/mockcluster/mockcluster.go | 11 ++++-- pkg/schedule/checker/checker_controller.go | 5 +-- pkg/schedule/checker/joint_state_checker.go | 6 ++-- pkg/schedule/checker/learner_checker.go | 6 ++-- pkg/schedule/checker/merge_checker.go | 10 +++--- pkg/schedule/checker/priority_inspector.go | 6 ++-- pkg/schedule/checker/replica_checker.go | 6 ++-- pkg/schedule/checker/replica_strategy.go | 4 +-- pkg/schedule/checker/rule_checker.go | 8 ++--- pkg/schedule/checker/split_checker.go | 6 ++-- pkg/schedule/filter/healthy.go | 22 +++--------- pkg/schedule/filter/region_filters.go | 11 +++--- pkg/schedule/operator/builder.go | 16 ++------- pkg/schedule/operator/create_operator.go | 33 ++++++++--------- pkg/schedule/operator/step.go | 29 +++++++-------- pkg/schedule/operator_controller.go | 15 ++++---- pkg/schedule/range_cluster.go | 21 +++++------ pkg/schedule/region_scatterer.go | 5 +-- pkg/schedule/region_splitter.go | 9 ++--- pkg/schedule/scheduler.go | 9 ++--- pkg/schedule/schedulers/balance_leader.go | 7 ++-- pkg/schedule/schedulers/balance_region.go | 5 +-- pkg/schedule/schedulers/balance_witness.go | 5 +-- pkg/schedule/schedulers/base_scheduler.go | 5 +-- pkg/schedule/schedulers/evict_leader.go | 15 ++++---- pkg/schedule/schedulers/evict_slow_store.go | 15 ++++---- pkg/schedule/schedulers/evict_slow_trend.go | 27 +++++++------- pkg/schedule/schedulers/grant_hot_region.go | 13 +++---- pkg/schedule/schedulers/grant_leader.go | 11 +++--- pkg/schedule/schedulers/hot_region.go | 35 +++++++++--------- pkg/schedule/schedulers/hot_region_config.go | 3 +- pkg/schedule/schedulers/label.go | 5 +-- pkg/schedule/schedulers/random_merge.go | 7 ++-- pkg/schedule/schedulers/scatter_range.go | 9 ++--- pkg/schedule/schedulers/shuffle_hot_region.go | 7 ++-- pkg/schedule/schedulers/shuffle_leader.go | 5 +-- pkg/schedule/schedulers/shuffle_region.go | 9 ++--- pkg/schedule/schedulers/split_bucket.go | 7 ++-- .../schedulers/transfer_witness_leader.go | 9 ++--- pkg/schedule/schedulers/utils.go | 9 ++--- .../cluster_informer.go} | 36 +++++++++++++------ plugin/scheduler_example/evict_leader.go | 11 +++--- server/cluster/coordinator_test.go | 3 +- server/replication/replication_mode.go | 6 ++-- 45 files changed, 268 insertions(+), 240 deletions(-) rename pkg/schedule/{cluster.go => scheduling/cluster_informer.go} (58%) diff --git a/pkg/keyspace/keyspace.go b/pkg/keyspace/keyspace.go index 986fc817a5b..87eaa628517 100644 --- a/pkg/keyspace/keyspace.go +++ b/pkg/keyspace/keyspace.go @@ -26,8 +26,8 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/id" "github.com/tikv/pd/pkg/mcs/utils" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/labeler" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" @@ -71,7 +71,7 @@ type Manager struct { // store is the storage for keyspace related information. store endpoint.KeyspaceStorage // rc is the raft cluster of the server. - cluster schedule.Cluster + cluster scheduling.ClusterInformer // ctx is the context of the manager, to be used in transaction. ctx context.Context // config is the configurations of the manager. @@ -98,7 +98,7 @@ type CreateKeyspaceRequest struct { func NewKeyspaceManager( ctx context.Context, store endpoint.KeyspaceStorage, - cluster schedule.Cluster, + cluster scheduling.ClusterInformer, idAllocator id.Allocator, config Config, kgm *GroupManager, diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 87cbc8479b2..b1b18cad74e 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -47,6 +47,7 @@ const ( // Cluster is used to mock a cluster for test purpose. type Cluster struct { + ctx context.Context *core.BasicCluster *mockid.IDAllocator *placement.RuleManager @@ -57,12 +58,13 @@ type Cluster struct { suspectRegions map[uint64]struct{} *config.StoreConfigManager *buckets.HotBucketCache - ctx context.Context + storage.Storage } // NewCluster creates a new Cluster func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster { clus := &Cluster{ + ctx: ctx, BasicCluster: core.NewBasicCluster(), IDAllocator: mockid.NewIDAllocator(), HotStat: statistics.NewHotStat(ctx), @@ -70,7 +72,7 @@ func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster { PersistOptions: opts, suspectRegions: map[uint64]struct{}{}, StoreConfigManager: config.NewTestStoreConfigManager(nil), - ctx: ctx, + Storage: storage.NewStorageWithMemoryBackend(), } if clus.PersistOptions.GetReplicationConfig().EnablePlacementRules { clus.initRuleManager() @@ -96,6 +98,11 @@ func (mc *Cluster) GetAllocator() id.Allocator { return mc.IDAllocator } +// GetStorage returns the storage. +func (mc *Cluster) GetStorage() storage.Storage { + return mc.Storage +} + // ScanRegions scans region with start key, until number greater than limit. func (mc *Cluster) ScanRegions(startKey, endKey []byte, limit int) []*core.RegionInfo { return mc.ScanRange(startKey, endKey, limit) diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index bb825ac0909..d95a695f2d6 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/keyutil" ) @@ -37,7 +38,7 @@ var denyCheckersByLabelerCounter = schedule.LabelerEventCounter.WithLabelValues( // Controller is used to manage all checkers. type Controller struct { - cluster schedule.Cluster + cluster scheduling.ClusterInformer conf config.Config opController *schedule.OperatorController learnerChecker *LearnerChecker @@ -53,7 +54,7 @@ type Controller struct { } // NewController create a new Controller. -func NewController(ctx context.Context, cluster schedule.Cluster, conf config.Config, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *schedule.OperatorController) *Controller { +func NewController(ctx context.Context, cluster scheduling.ClusterInformer, conf config.Config, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *schedule.OperatorController) *Controller { regionWaitingList := cache.NewDefaultCache(DefaultCacheSize) return &Controller{ cluster: cluster, diff --git a/pkg/schedule/checker/joint_state_checker.go b/pkg/schedule/checker/joint_state_checker.go index 83a463b24fa..c2ef2af41ae 100644 --- a/pkg/schedule/checker/joint_state_checker.go +++ b/pkg/schedule/checker/joint_state_checker.go @@ -19,14 +19,14 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/scheduling" ) // JointStateChecker ensures region is in joint state will leave. type JointStateChecker struct { PauseController - cluster schedule.Cluster + cluster scheduling.ClusterInformer } const jointStateCheckerName = "joint_state_checker" @@ -41,7 +41,7 @@ var ( ) // NewJointStateChecker creates a joint state checker. -func NewJointStateChecker(cluster schedule.Cluster) *JointStateChecker { +func NewJointStateChecker(cluster scheduling.ClusterInformer) *JointStateChecker { return &JointStateChecker{ cluster: cluster, } diff --git a/pkg/schedule/checker/learner_checker.go b/pkg/schedule/checker/learner_checker.go index 04d2545c52b..dba310fe501 100644 --- a/pkg/schedule/checker/learner_checker.go +++ b/pkg/schedule/checker/learner_checker.go @@ -18,14 +18,14 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/scheduling" ) // LearnerChecker ensures region has a learner will be promoted. type LearnerChecker struct { PauseController - cluster schedule.Cluster + cluster scheduling.ClusterInformer } var ( @@ -34,7 +34,7 @@ var ( ) // NewLearnerChecker creates a learner checker. -func NewLearnerChecker(cluster schedule.Cluster) *LearnerChecker { +func NewLearnerChecker(cluster scheduling.ClusterInformer) *LearnerChecker { return &LearnerChecker{ cluster: cluster, } diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 2ebc8537b4a..ec95660b3b6 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -25,12 +25,12 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/logutil" ) @@ -76,14 +76,14 @@ var ( // MergeChecker ensures region to merge with adjacent region when size is small type MergeChecker struct { PauseController - cluster schedule.Cluster + cluster scheduling.ClusterInformer conf config.Config splitCache *cache.TTLUint64 startTime time.Time // it's used to judge whether server recently start. } // NewMergeChecker creates a merge checker. -func NewMergeChecker(ctx context.Context, cluster schedule.Cluster, conf config.Config) *MergeChecker { +func NewMergeChecker(ctx context.Context, cluster scheduling.ClusterInformer, conf config.Config) *MergeChecker { splitCache := cache.NewIDTTL(ctx, time.Minute, conf.GetSplitMergeInterval()) return &MergeChecker{ cluster: cluster, @@ -250,7 +250,7 @@ func (m *MergeChecker) checkTarget(region, adjacent *core.RegionInfo) bool { } // AllowMerge returns true if two regions can be merged according to the key type. -func AllowMerge(cluster schedule.Cluster, region, adjacent *core.RegionInfo) bool { +func AllowMerge(cluster scheduling.ClusterInformer, region, adjacent *core.RegionInfo) bool { var start, end []byte if bytes.Equal(region.GetEndKey(), adjacent.GetStartKey()) && len(region.GetEndKey()) != 0 { start, end = region.GetStartKey(), adjacent.GetEndKey() @@ -306,7 +306,7 @@ func isTableIDSame(region, adjacent *core.RegionInfo) bool { // Check whether there is a peer of the adjacent region on an offline store, // while the source region has no peer on it. This is to prevent from bringing // any other peer into an offline store to slow down the offline process. -func checkPeerStore(cluster schedule.Cluster, region, adjacent *core.RegionInfo) bool { +func checkPeerStore(cluster scheduling.ClusterInformer, region, adjacent *core.RegionInfo) bool { regionStoreIDs := region.GetStoreIDs() for _, peer := range adjacent.GetPeers() { storeID := peer.GetStoreId() diff --git a/pkg/schedule/checker/priority_inspector.go b/pkg/schedule/checker/priority_inspector.go index 98e8f4e36ad..3913af809ee 100644 --- a/pkg/schedule/checker/priority_inspector.go +++ b/pkg/schedule/checker/priority_inspector.go @@ -19,9 +19,9 @@ import ( "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" ) // the default value of priority queue size @@ -29,13 +29,13 @@ const defaultPriorityQueueSize = 1280 // PriorityInspector ensures high priority region should run first type PriorityInspector struct { - cluster schedule.Cluster + cluster scheduling.ClusterInformer conf config.Config queue *cache.PriorityQueue } // NewPriorityInspector creates a priority inspector. -func NewPriorityInspector(cluster schedule.Cluster, conf config.Config) *PriorityInspector { +func NewPriorityInspector(cluster scheduling.ClusterInformer, conf config.Config) *PriorityInspector { return &PriorityInspector{ cluster: cluster, conf: conf, diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index 1149d8ccd5e..f8a36c99c06 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -23,9 +23,9 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -61,13 +61,13 @@ var ( // Location management, mainly used for cross data center deployment. type ReplicaChecker struct { PauseController - cluster schedule.Cluster + cluster scheduling.ClusterInformer conf config.Config regionWaitingList cache.Cache } // NewReplicaChecker creates a replica checker. -func NewReplicaChecker(cluster schedule.Cluster, conf config.Config, regionWaitingList cache.Cache) *ReplicaChecker { +func NewReplicaChecker(cluster scheduling.ClusterInformer, conf config.Config, regionWaitingList cache.Cache) *ReplicaChecker { return &ReplicaChecker{ cluster: cluster, conf: conf, diff --git a/pkg/schedule/checker/replica_strategy.go b/pkg/schedule/checker/replica_strategy.go index 8b9b055f0a7..03b35606ce7 100644 --- a/pkg/schedule/checker/replica_strategy.go +++ b/pkg/schedule/checker/replica_strategy.go @@ -18,8 +18,8 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/filter" + "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -27,7 +27,7 @@ import ( // exists to allow replica_checker and rule_checker to reuse common logics. type ReplicaStrategy struct { checkerName string // replica-checker / rule-checker - cluster schedule.Cluster + cluster scheduling.ClusterInformer locationLabels []string isolationLevel string region *core.RegionInfo diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 13b69484b04..45509df9ad9 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -27,10 +27,10 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/zap" ) @@ -81,7 +81,7 @@ var ( // RuleChecker fix/improve region by placement rules. type RuleChecker struct { PauseController - cluster schedule.Cluster + cluster scheduling.ClusterInformer ruleManager *placement.RuleManager name string regionWaitingList cache.Cache @@ -91,7 +91,7 @@ type RuleChecker struct { } // NewRuleChecker creates a checker instance. -func NewRuleChecker(ctx context.Context, cluster schedule.Cluster, ruleManager *placement.RuleManager, regionWaitingList cache.Cache) *RuleChecker { +func NewRuleChecker(ctx context.Context, cluster scheduling.ClusterInformer, ruleManager *placement.RuleManager, regionWaitingList cache.Cache) *RuleChecker { return &RuleChecker{ cluster: cluster, ruleManager: ruleManager, @@ -572,7 +572,7 @@ func (o *recorder) incOfflineLeaderCount(storeID uint64) { // Offline is triggered manually and only appears when the node makes some adjustments. here is an operator timeout / 2. var offlineCounterTTL = 5 * time.Minute -func (o *recorder) refresh(cluster schedule.Cluster) { +func (o *recorder) refresh(cluster scheduling.ClusterInformer) { // re-count the offlineLeaderCounter if the store is already tombstone or store is gone. if len(o.offlineLeaderCounter) > 0 && time.Since(o.lastUpdateTime) > offlineCounterTTL { needClean := false diff --git a/pkg/schedule/checker/split_checker.go b/pkg/schedule/checker/split_checker.go index f89604909ea..02db5514f0f 100644 --- a/pkg/schedule/checker/split_checker.go +++ b/pkg/schedule/checker/split_checker.go @@ -19,16 +19,16 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" ) // SplitChecker splits regions when the key range spans across rule/label boundary. type SplitChecker struct { PauseController - cluster schedule.Cluster + cluster scheduling.ClusterInformer ruleManager *placement.RuleManager labeler *labeler.RegionLabeler } @@ -42,7 +42,7 @@ var ( ) // NewSplitChecker creates a new SplitChecker. -func NewSplitChecker(cluster schedule.Cluster, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler) *SplitChecker { +func NewSplitChecker(cluster scheduling.ClusterInformer, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler) *SplitChecker { return &SplitChecker{ cluster: cluster, ruleManager: ruleManager, diff --git a/pkg/schedule/filter/healthy.go b/pkg/schedule/filter/healthy.go index 905aadca1c8..c249bd3c432 100644 --- a/pkg/schedule/filter/healthy.go +++ b/pkg/schedule/filter/healthy.go @@ -16,8 +16,7 @@ package filter import ( "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/schedule/config" - "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" ) // IsRegionHealthy checks if a region is healthy for scheduling. It requires the @@ -43,30 +42,17 @@ func hasDownPeers(region *core.RegionInfo) bool { // IsRegionReplicated checks if a region is fully replicated. When placement // rules is enabled, its peers should fit corresponding rules. When placement // rules is disabled, it should have enough replicas and no any learner peer. -func IsRegionReplicated(cluster regionHealthCluster, region *core.RegionInfo) bool { +func IsRegionReplicated(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { if cluster.GetOpts().IsPlacementRulesEnabled() { return isRegionPlacementRuleSatisfied(cluster, region) } return isRegionReplicasSatisfied(cluster, region) } -func isRegionPlacementRuleSatisfied(cluster regionHealthCluster, region *core.RegionInfo) bool { +func isRegionPlacementRuleSatisfied(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { return cluster.GetRuleManager().FitRegion(cluster, region).IsSatisfied() } -func isRegionReplicasSatisfied(cluster regionHealthCluster, region *core.RegionInfo) bool { +func isRegionReplicasSatisfied(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { return len(region.GetLearners()) == 0 && len(region.GetPeers()) == cluster.GetOpts().GetMaxReplicas() } - -// ReplicatedRegion returns a function that checks if a region is fully replicated. -func ReplicatedRegion(cluster regionHealthCluster) func(*core.RegionInfo) bool { - return func(region *core.RegionInfo) bool { return IsRegionReplicated(cluster, region) } -} - -// cluster provides an overview of a cluster's regions distribution. -type regionHealthCluster interface { - core.StoreSetInformer - core.RegionSetInformer - GetOpts() config.Config - GetRuleManager() *placement.RuleManager -} diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index dce0aeaa816..38b25dbb14f 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -20,6 +20,7 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" ) @@ -99,12 +100,12 @@ func (f *regionDownFilter) Select(region *core.RegionInfo) *plan.Status { // RegionReplicatedFilter filters all unreplicated regions. type RegionReplicatedFilter struct { - cluster regionHealthCluster + cluster scheduling.RegionHealthCluster fit *placement.RegionFit } // NewRegionReplicatedFilter creates a RegionFilter that filters all unreplicated regions. -func NewRegionReplicatedFilter(cluster regionHealthCluster) RegionFilter { +func NewRegionReplicatedFilter(cluster scheduling.RegionHealthCluster) RegionFilter { return &RegionReplicatedFilter{cluster: cluster} } @@ -131,11 +132,11 @@ func (f *RegionReplicatedFilter) Select(region *core.RegionInfo) *plan.Status { } type regionEmptyFilter struct { - cluster regionHealthCluster + cluster scheduling.RegionHealthCluster } // NewRegionEmptyFilter returns creates a RegionFilter that filters all empty regions. -func NewRegionEmptyFilter(cluster regionHealthCluster) RegionFilter { +func NewRegionEmptyFilter(cluster scheduling.RegionHealthCluster) RegionFilter { return ®ionEmptyFilter{cluster: cluster} } @@ -147,7 +148,7 @@ func (f *regionEmptyFilter) Select(region *core.RegionInfo) *plan.Status { } // isEmptyRegionAllowBalance returns true if the region is not empty or the number of regions is too small. -func isEmptyRegionAllowBalance(cluster regionHealthCluster, region *core.RegionInfo) bool { +func isEmptyRegionAllowBalance(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { return region.GetApproximateSize() > core.EmptyRegionApproximateSize || cluster.GetRegionCount() < core.InitClusterRegionThreshold } diff --git a/pkg/schedule/operator/builder.go b/pkg/schedule/operator/builder.go index 88ab6e551d9..d3668fcec67 100644 --- a/pkg/schedule/operator/builder.go +++ b/pkg/schedule/operator/builder.go @@ -21,23 +21,13 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/id" - "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" ) -// ClusterInformer provides the necessary information for building operator. -type ClusterInformer interface { - GetBasicCluster() *core.BasicCluster - GetOpts() config.Config - GetStoreConfig() config.StoreConfig - GetRuleManager() *placement.RuleManager - GetAllocator() id.Allocator -} - // Builder is used to create operators. Usage: // // op, err := NewBuilder(desc, cluster, region). @@ -50,7 +40,7 @@ type ClusterInformer interface { // according to various constraints. type Builder struct { // basic info - ClusterInformer + scheduling.ClusterInformer desc string regionID uint64 regionEpoch *metapb.RegionEpoch @@ -102,7 +92,7 @@ func SkipPlacementRulesCheck(b *Builder) { } // NewBuilder creates a Builder. -func NewBuilder(desc string, ci ClusterInformer, region *core.RegionInfo, opts ...BuilderOption) *Builder { +func NewBuilder(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, opts ...BuilderOption) *Builder { b := &Builder{ desc: desc, ClusterInformer: ci, diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 0caa656f045..e1137e14aeb 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -25,40 +25,41 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/logutil" "go.uber.org/zap" ) // CreateAddPeerOperator creates an operator that adds a new peer. -func CreateAddPeerOperator(desc string, ci ClusterInformer, region *core.RegionInfo, peer *metapb.Peer, kind OpKind) (*Operator, error) { +func CreateAddPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region). AddPeer(peer). Build(kind) } // CreateDemoteVoterOperator creates an operator that demotes a voter -func CreateDemoteVoterOperator(desc string, ci ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateDemoteVoterOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). DemoteVoter(peer.GetStoreId()). Build(0) } // CreatePromoteLearnerOperator creates an operator that promotes a learner. -func CreatePromoteLearnerOperator(desc string, ci ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreatePromoteLearnerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). PromoteLearner(peer.GetStoreId()). Build(0) } // CreateRemovePeerOperator creates an operator that removes a peer from region. -func CreateRemovePeerOperator(desc string, ci ClusterInformer, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { +func CreateRemovePeerOperator(desc string, ci scheduling.ClusterInformer, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(storeID). Build(kind) } // CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. -func CreateTransferLeaderOperator(desc string, ci ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { +func CreateTransferLeaderOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck). SetLeader(targetStoreID). SetLeaders(targetStoreIDs). @@ -66,7 +67,7 @@ func CreateTransferLeaderOperator(desc string, ci ClusterInformer, region *core. } // CreateForceTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store forcible. -func CreateForceTransferLeaderOperator(desc string, ci ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { +func CreateForceTransferLeaderOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck, SkipPlacementRulesCheck). SetLeader(targetStoreID). EnableForceTargetLeader(). @@ -74,7 +75,7 @@ func CreateForceTransferLeaderOperator(desc string, ci ClusterInformer, region * } // CreateMoveRegionOperator creates an operator that moves a region to specified stores. -func CreateMoveRegionOperator(desc string, ci ClusterInformer, region *core.RegionInfo, kind OpKind, roles map[uint64]placement.PeerRoleType) (*Operator, error) { +func CreateMoveRegionOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, roles map[uint64]placement.PeerRoleType) (*Operator, error) { // construct the peers from roles oldPeers := region.GetPeers() peers := make(map[uint64]*metapb.Peer) @@ -96,7 +97,7 @@ func CreateMoveRegionOperator(desc string, ci ClusterInformer, region *core.Regi } // CreateMovePeerOperator creates an operator that replaces an old peer with a new peer. -func CreateMovePeerOperator(desc string, ci ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { +func CreateMovePeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -104,7 +105,7 @@ func CreateMovePeerOperator(desc string, ci ClusterInformer, region *core.Region } // CreateMoveWitnessOperator creates an operator that replaces an old witness with a new witness. -func CreateMoveWitnessOperator(desc string, ci ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64) (*Operator, error) { +func CreateMoveWitnessOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeNonWitness(sourceStoreID). BecomeWitness(targetStoreID). @@ -112,7 +113,7 @@ func CreateMoveWitnessOperator(desc string, ci ClusterInformer, region *core.Reg } // CreateReplaceLeaderPeerOperator creates an operator that replaces an old peer with a new peer, and move leader from old store firstly. -func CreateReplaceLeaderPeerOperator(desc string, ci ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer, leader *metapb.Peer) (*Operator, error) { +func CreateReplaceLeaderPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer, leader *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -121,7 +122,7 @@ func CreateReplaceLeaderPeerOperator(desc string, ci ClusterInformer, region *co } // CreateMoveLeaderOperator creates an operator that replaces an old leader with a new leader. -func CreateMoveLeaderOperator(desc string, ci ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { +func CreateMoveLeaderOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -156,7 +157,7 @@ func CreateSplitRegionOperator(desc string, region *core.RegionInfo, kind OpKind } // CreateMergeRegionOperator creates an operator that merge two region into one. -func CreateMergeRegionOperator(desc string, ci ClusterInformer, source *core.RegionInfo, target *core.RegionInfo, kind OpKind) ([]*Operator, error) { +func CreateMergeRegionOperator(desc string, ci scheduling.ClusterInformer, source *core.RegionInfo, target *core.RegionInfo, kind OpKind) ([]*Operator, error) { if core.IsInJointState(source.GetPeers()...) || core.IsInJointState(target.GetPeers()...) { return nil, errors.Errorf("cannot merge regions which are in joint state") } @@ -214,7 +215,7 @@ func isRegionMatch(a, b *core.RegionInfo) bool { } // CreateScatterRegionOperator creates an operator that scatters the specified region. -func CreateScatterRegionOperator(desc string, ci ClusterInformer, origin *core.RegionInfo, targetPeers map[uint64]*metapb.Peer, targetLeader uint64) (*Operator, error) { +func CreateScatterRegionOperator(desc string, ci scheduling.ClusterInformer, origin *core.RegionInfo, targetPeers map[uint64]*metapb.Peer, targetLeader uint64) (*Operator, error) { // randomly pick a leader. var ids []uint64 for id, peer := range targetPeers { @@ -242,7 +243,7 @@ func CreateScatterRegionOperator(desc string, ci ClusterInformer, origin *core.R const OpDescLeaveJointState = "leave-joint-state" // CreateLeaveJointStateOperator creates an operator that let region leave joint state. -func CreateLeaveJointStateOperator(desc string, ci ClusterInformer, origin *core.RegionInfo) (*Operator, error) { +func CreateLeaveJointStateOperator(desc string, ci scheduling.ClusterInformer, origin *core.RegionInfo) (*Operator, error) { b := NewBuilder(desc, ci, origin, SkipOriginJointStateCheck, SkipPlacementRulesCheck) if b.err == nil && !core.IsInJointState(origin.GetPeers()...) { @@ -302,14 +303,14 @@ func CreateLeaveJointStateOperator(desc string, ci ClusterInformer, origin *core } // CreateWitnessPeerOperator creates an operator that set a follower or learner peer with witness -func CreateWitnessPeerOperator(desc string, ci ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateWitnessPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeWitness(peer.GetStoreId()). Build(OpWitness) } // CreateNonWitnessPeerOperator creates an operator that set a peer with non-witness -func CreateNonWitnessPeerOperator(desc string, ci ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateNonWitnessPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeNonWitness(peer.GetStoreId()). Build(OpWitness) diff --git a/pkg/schedule/operator/step.go b/pkg/schedule/operator/step.go index a53513d27ae..9cc24d5de33 100644 --- a/pkg/schedule/operator/step.go +++ b/pkg/schedule/operator/step.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" ) @@ -53,7 +54,7 @@ type OpStep interface { fmt.Stringer ConfVerChanged(region *core.RegionInfo) uint64 IsFinish(region *core.RegionInfo) bool - CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error + CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error Influence(opInfluence OpInfluence, region *core.RegionInfo) Timeout(regionSize int64) time.Duration GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *pdpb.RegionHeartbeatResponse @@ -87,7 +88,7 @@ func (tl TransferLeader) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (tl TransferLeader) CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error { +func (tl TransferLeader) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { errList := make([]error, 0, len(tl.ToStores)+1) for _, storeID := range append(tl.ToStores, tl.ToStore) { peer := region.GetStorePeer(tl.ToStore) @@ -192,7 +193,7 @@ func (ap AddPeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) { } // CheckInProgress checks if the step is in the progress of advancing. -func (ap AddPeer) CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error { +func (ap AddPeer) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, ap.ToStore); err != nil { return err } @@ -246,7 +247,7 @@ func (bw BecomeWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bw BecomeWitness) CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error { +func (bw BecomeWitness) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, bw.StoreID); err != nil { return err } @@ -308,7 +309,7 @@ func (bn BecomeNonWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bn BecomeNonWitness) CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error { +func (bn BecomeNonWitness) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, bn.StoreID); err != nil { return err } @@ -394,7 +395,7 @@ func (bsw BatchSwitchWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bsw BatchSwitchWitness) CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error { +func (bsw BatchSwitchWitness) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { for _, w := range bsw.ToWitnesses { if err := w.CheckInProgress(ci, region); err != nil { return err @@ -477,7 +478,7 @@ func (al AddLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (al AddLearner) CheckInProgress(ci ClusterInformer, region *core.RegionInfo) error { +func (al AddLearner) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, al.ToStore); err != nil { return err } @@ -563,7 +564,7 @@ func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (pl PromoteLearner) CheckInProgress(_ ClusterInformer, region *core.RegionInfo) error { +func (pl PromoteLearner) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { peer := region.GetStorePeer(pl.ToStore) if peer.GetId() != pl.PeerID { return errors.New("peer does not exist") @@ -614,7 +615,7 @@ func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (rp RemovePeer) CheckInProgress(_ ClusterInformer, region *core.RegionInfo) error { +func (rp RemovePeer) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { if rp.FromStore == region.GetLeader().GetStoreId() { return errors.New("cannot remove leader peer") } @@ -684,7 +685,7 @@ func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (mr MergeRegion) CheckInProgress(_ ClusterInformer, _ *core.RegionInfo) error { +func (mr MergeRegion) CheckInProgress(_ scheduling.ClusterInformer, _ *core.RegionInfo) error { return nil } @@ -752,7 +753,7 @@ func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo } // CheckInProgress checks if the step is in the progress of advancing. -func (sr SplitRegion) CheckInProgress(_ ClusterInformer, _ *core.RegionInfo) error { +func (sr SplitRegion) CheckInProgress(_ scheduling.ClusterInformer, _ *core.RegionInfo) error { return nil } @@ -877,7 +878,7 @@ func (cpe ChangePeerV2Enter) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpe ChangePeerV2Enter) CheckInProgress(_ ClusterInformer, region *core.RegionInfo) error { +func (cpe ChangePeerV2Enter) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { inJointState, notInJointState := false, false for _, pl := range cpe.PromoteLearners { peer := region.GetStorePeer(pl.ToStore) @@ -1006,7 +1007,7 @@ func (cpl ChangePeerV2Leave) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpl ChangePeerV2Leave) CheckInProgress(_ ClusterInformer, region *core.RegionInfo) error { +func (cpl ChangePeerV2Leave) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { inJointState, notInJointState, demoteLeader := false, false, false leaderStoreID := region.GetLeader().GetStoreId() @@ -1084,7 +1085,7 @@ func (cpl ChangePeerV2Leave) GetCmd(region *core.RegionInfo, useConfChangeV2 boo } } -func validateStore(ci ClusterInformer, id uint64) error { +func validateStore(ci scheduling.ClusterInformer, id uint64) error { store := ci.GetBasicCluster().GetStore(id) if store == nil { return errors.New("target store does not exist") diff --git a/pkg/schedule/operator_controller.go b/pkg/schedule/operator_controller.go index 74633995099..5ff7e699646 100644 --- a/pkg/schedule/operator_controller.go +++ b/pkg/schedule/operator_controller.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/zap" @@ -58,7 +59,7 @@ var ( type OperatorController struct { syncutil.RWMutex ctx context.Context - cluster Cluster + cluster scheduling.ClusterInformer operators map[uint64]*operator.Operator hbStreams *hbstream.HeartbeatStreams fastOperators *cache.TTLUint64 @@ -70,7 +71,7 @@ type OperatorController struct { } // NewOperatorController creates a OperatorController. -func NewOperatorController(ctx context.Context, cluster Cluster, hbStreams *hbstream.HeartbeatStreams) *OperatorController { +func NewOperatorController(ctx context.Context, cluster scheduling.ClusterInformer, hbStreams *hbstream.HeartbeatStreams) *OperatorController { return &OperatorController{ ctx: ctx, cluster: cluster, @@ -92,7 +93,7 @@ func (oc *OperatorController) Ctx() context.Context { } // GetCluster exports cluster to evict-scheduler for check store status. -func (oc *OperatorController) GetCluster() Cluster { +func (oc *OperatorController) GetCluster() scheduling.ClusterInformer { oc.RLock() defer oc.RUnlock() return oc.cluster @@ -715,7 +716,7 @@ func (oc *OperatorController) OperatorCount(kind operator.OpKind) uint64 { } // GetOpInfluence gets OpInfluence. -func (oc *OperatorController) GetOpInfluence(cluster Cluster) operator.OpInfluence { +func (oc *OperatorController) GetOpInfluence(cluster scheduling.ClusterInformer) operator.OpInfluence { influence := operator.OpInfluence{ StoresInfluence: make(map[uint64]*operator.StoreInfluence), } @@ -733,7 +734,7 @@ func (oc *OperatorController) GetOpInfluence(cluster Cluster) operator.OpInfluen } // GetFastOpInfluence get fast finish operator influence -func (oc *OperatorController) GetFastOpInfluence(cluster Cluster, influence operator.OpInfluence) { +func (oc *OperatorController) GetFastOpInfluence(cluster scheduling.ClusterInformer, influence operator.OpInfluence) { for _, id := range oc.fastOperators.GetAllID() { value, ok := oc.fastOperators.Get(id) if !ok { @@ -748,13 +749,13 @@ func (oc *OperatorController) GetFastOpInfluence(cluster Cluster, influence oper } // AddOpInfluence add operator influence for cluster -func AddOpInfluence(op *operator.Operator, influence operator.OpInfluence, cluster Cluster) { +func AddOpInfluence(op *operator.Operator, influence operator.OpInfluence, cluster scheduling.ClusterInformer) { region := cluster.GetRegion(op.RegionID()) op.TotalInfluence(influence, region) } // NewTotalOpInfluence creates a OpInfluence. -func NewTotalOpInfluence(operators []*operator.Operator, cluster Cluster) operator.OpInfluence { +func NewTotalOpInfluence(operators []*operator.Operator, cluster scheduling.ClusterInformer) operator.OpInfluence { influence := *operator.NewOpInfluence() for _, op := range operators { diff --git a/pkg/schedule/range_cluster.go b/pkg/schedule/range_cluster.go index 5c2645c4a3e..6eb5117290c 100644 --- a/pkg/schedule/range_cluster.go +++ b/pkg/schedule/range_cluster.go @@ -17,26 +17,27 @@ package schedule import ( "github.com/docker/go-units" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/schedule/scheduling" ) // RangeCluster isolates the cluster by range. type RangeCluster struct { - Cluster + scheduling.ClusterInformer subCluster *core.BasicCluster // Collect all regions belong to the range. tolerantSizeRatio float64 } // GenRangeCluster gets a range cluster by specifying start key and end key. // The cluster can only know the regions within [startKey, endKey]. -func GenRangeCluster(cluster Cluster, startKey, endKey []byte) *RangeCluster { +func GenRangeCluster(cluster scheduling.ClusterInformer, startKey, endKey []byte) *RangeCluster { subCluster := core.NewBasicCluster() for _, r := range cluster.ScanRegions(startKey, endKey, -1) { origin, overlaps, rangeChanged := subCluster.SetRegion(r) subCluster.UpdateSubTree(r, origin, overlaps, rangeChanged) } return &RangeCluster{ - Cluster: cluster, - subCluster: subCluster, + ClusterInformer: cluster, + subCluster: subCluster, } } @@ -69,7 +70,7 @@ func (r *RangeCluster) updateStoreInfo(s *core.StoreInfo) *core.StoreInfo { // GetStore searches for a store by ID. func (r *RangeCluster) GetStore(id uint64) *core.StoreInfo { - s := r.Cluster.GetStore(id) + s := r.ClusterInformer.GetStore(id) if s == nil { return nil } @@ -78,7 +79,7 @@ func (r *RangeCluster) GetStore(id uint64) *core.StoreInfo { // GetStores returns all Stores in the cluster. func (r *RangeCluster) GetStores() []*core.StoreInfo { - stores := r.Cluster.GetStores() + stores := r.ClusterInformer.GetStores() newStores := make([]*core.StoreInfo, 0, len(stores)) for _, s := range stores { newStores = append(newStores, r.updateStoreInfo(s)) @@ -96,7 +97,7 @@ func (r *RangeCluster) GetTolerantSizeRatio() float64 { if r.tolerantSizeRatio != 0 { return r.tolerantSizeRatio } - return r.Cluster.GetOpts().GetTolerantSizeRatio() + return r.ClusterInformer.GetOpts().GetTolerantSizeRatio() } // RandFollowerRegions returns a random region that has a follower on the store. @@ -116,7 +117,7 @@ func (r *RangeCluster) GetAverageRegionSize() int64 { // GetRegionStores returns all stores that contains the region's peer. func (r *RangeCluster) GetRegionStores(region *core.RegionInfo) []*core.StoreInfo { - stores := r.Cluster.GetRegionStores(region) + stores := r.ClusterInformer.GetRegionStores(region) newStores := make([]*core.StoreInfo, 0, len(stores)) for _, s := range stores { newStores = append(newStores, r.updateStoreInfo(s)) @@ -126,7 +127,7 @@ func (r *RangeCluster) GetRegionStores(region *core.RegionInfo) []*core.StoreInf // GetFollowerStores returns all stores that contains the region's follower peer. func (r *RangeCluster) GetFollowerStores(region *core.RegionInfo) []*core.StoreInfo { - stores := r.Cluster.GetFollowerStores(region) + stores := r.ClusterInformer.GetFollowerStores(region) newStores := make([]*core.StoreInfo, 0, len(stores)) for _, s := range stores { newStores = append(newStores, r.updateStoreInfo(s)) @@ -136,7 +137,7 @@ func (r *RangeCluster) GetFollowerStores(region *core.RegionInfo) []*core.StoreI // GetLeaderStore returns all stores that contains the region's leader peer. func (r *RangeCluster) GetLeaderStore(region *core.RegionInfo) *core.StoreInfo { - s := r.Cluster.GetLeaderStore(region) + s := r.ClusterInformer.GetLeaderStore(region) if s != nil { return r.updateStoreInfo(s) } diff --git a/pkg/schedule/region_scatterer.go b/pkg/schedule/region_scatterer.go index d5e53965ad4..920885269fe 100644 --- a/pkg/schedule/region_scatterer.go +++ b/pkg/schedule/region_scatterer.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" @@ -131,7 +132,7 @@ func (s *selectedStores) getDistributionByGroupLocked(group string) (map[uint64] type RegionScatterer struct { ctx context.Context name string - cluster Cluster + cluster scheduling.ClusterInformer ordinaryEngine engineContext specialEngines sync.Map opController *OperatorController @@ -139,7 +140,7 @@ type RegionScatterer struct { // NewRegionScatterer creates a region scatterer. // RegionScatter is used for the `Lightning`, it will scatter the specified regions before import data. -func NewRegionScatterer(ctx context.Context, cluster Cluster, opController *OperatorController) *RegionScatterer { +func NewRegionScatterer(ctx context.Context, cluster scheduling.ClusterInformer, opController *OperatorController) *RegionScatterer { return &RegionScatterer{ ctx: ctx, name: regionScatterName, diff --git a/pkg/schedule/region_splitter.go b/pkg/schedule/region_splitter.go index 8817fb56121..d58a636c416 100644 --- a/pkg/schedule/region_splitter.go +++ b/pkg/schedule/region_splitter.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" @@ -44,7 +45,7 @@ type SplitRegionsHandler interface { } // NewSplitRegionsHandler return SplitRegionsHandler -func NewSplitRegionsHandler(cluster Cluster, oc *OperatorController) SplitRegionsHandler { +func NewSplitRegionsHandler(cluster scheduling.ClusterInformer, oc *OperatorController) SplitRegionsHandler { return &splitRegionsHandler{ cluster: cluster, oc: oc, @@ -53,12 +54,12 @@ func NewSplitRegionsHandler(cluster Cluster, oc *OperatorController) SplitRegion // RegionSplitter handles split regions type RegionSplitter struct { - cluster Cluster + cluster scheduling.ClusterInformer handler SplitRegionsHandler } // NewRegionSplitter return a region splitter -func NewRegionSplitter(cluster Cluster, handler SplitRegionsHandler) *RegionSplitter { +func NewRegionSplitter(cluster scheduling.ClusterInformer, handler SplitRegionsHandler) *RegionSplitter { return &RegionSplitter{ cluster: cluster, handler: handler, @@ -177,7 +178,7 @@ func (r *RegionSplitter) checkRegionValid(region *core.RegionInfo) bool { } type splitRegionsHandler struct { - cluster Cluster + cluster scheduling.ClusterInformer oc *OperatorController } diff --git a/pkg/schedule/scheduler.go b/pkg/schedule/scheduler.go index cb16177d87e..49eb9c35e3a 100644 --- a/pkg/schedule/scheduler.go +++ b/pkg/schedule/scheduler.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) @@ -39,10 +40,10 @@ type Scheduler interface { EncodeConfig() ([]byte, error) GetMinInterval() time.Duration GetNextInterval(interval time.Duration) time.Duration - Prepare(cluster Cluster) error - Cleanup(cluster Cluster) - Schedule(cluster Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) - IsScheduleAllowed(cluster Cluster) bool + Prepare(cluster scheduling.ClusterInformer) error + Cleanup(cluster scheduling.ClusterInformer) + Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) + IsScheduleAllowed(cluster scheduling.ClusterInformer) bool } // EncodeConfig encode the custom config for each scheduler. diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 6769a78d502..3ca644fb056 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -226,7 +227,7 @@ func (l *balanceLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(l.conf) } -func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := l.opController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() @@ -326,7 +327,7 @@ func (cs *candidateStores) resortStoreWithPos(pos int) { } } -func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *balanceLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { l.conf.mu.RLock() defer l.conf.mu.RUnlock() basePlan := NewBalanceSchedulerPlan() @@ -421,7 +422,7 @@ func makeInfluence(op *operator.Operator, plan *solver, usedRegions map[uint64]s storesIDs := candidate.binarySearchStores(plan.source, plan.target) candidateUpdateStores[id] = storesIDs } - schedule.AddOpInfluence(op, plan.opInfluence, plan.Cluster) + schedule.AddOpInfluence(op, plan.opInfluence, plan.ClusterInformer) for id, candidate := range candidates { for _, pos := range candidateUpdateStores[id] { candidate.resortStoreWithPos(pos) diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 8b014a42067..74a9217db2d 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -115,7 +116,7 @@ func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *balanceRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *balanceRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.opController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() @@ -123,7 +124,7 @@ func (s *balanceRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) boo return allowed } -func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *balanceRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { basePlan := NewBalanceSchedulerPlan() var collector *plan.Collector if dryRun { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index f481cbbc8b5..be71233fb65 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -211,7 +212,7 @@ func (b *balanceWitnessScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(b.conf) } -func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := b.opController.OperatorCount(operator.OpWitness) < cluster.GetOpts().GetWitnessScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(b.GetType(), operator.OpWitness.String()).Inc() @@ -219,7 +220,7 @@ func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster schedule.Cluster) bo return allowed } -func (b *balanceWitnessScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (b *balanceWitnessScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { b.conf.mu.RLock() defer b.conf.mu.RUnlock() basePlan := NewBalanceSchedulerPlan() diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index 9da9b55b04e..db1bebba6b4 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/typeutil" ) @@ -87,7 +88,7 @@ func (s *BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { } // Prepare does some prepare work -func (s *BaseScheduler) Prepare(cluster schedule.Cluster) error { return nil } +func (s *BaseScheduler) Prepare(cluster scheduling.ClusterInformer) error { return nil } // Cleanup does some cleanup work -func (s *BaseScheduler) Cleanup(cluster schedule.Cluster) {} +func (s *BaseScheduler) Cleanup(cluster scheduling.ClusterInformer) {} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 55d4fdd405b..803963ea11e 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -59,7 +60,7 @@ type evictLeaderSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWithRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` - cluster schedule.Cluster + cluster scheduling.ClusterInformer } func (conf *evictLeaderSchedulerConfig) getStores() []uint64 { @@ -203,7 +204,7 @@ func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictLeaderScheduler) Prepare(cluster schedule.Cluster) error { +func (s *evictLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -215,7 +216,7 @@ func (s *evictLeaderScheduler) Prepare(cluster schedule.Cluster) error { return res } -func (s *evictLeaderScheduler) Cleanup(cluster schedule.Cluster) { +func (s *evictLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWithRanges { @@ -223,7 +224,7 @@ func (s *evictLeaderScheduler) Cleanup(cluster schedule.Cluster) { } } -func (s *evictLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *evictLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -231,7 +232,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool return allowed } -func (s *evictLeaderScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize), nil } @@ -256,7 +257,7 @@ type evictLeaderStoresConf interface { getKeyRangesByID(id uint64) []core.KeyRange } -func scheduleEvictLeaderBatch(name, typ string, cluster schedule.Cluster, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { +func scheduleEvictLeaderBatch(name, typ string, cluster scheduling.ClusterInformer, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { var ops []*operator.Operator for i := 0; i < batchSize; i++ { once := scheduleEvictLeaderOnce(name, typ, cluster, conf) @@ -273,7 +274,7 @@ func scheduleEvictLeaderBatch(name, typ string, cluster schedule.Cluster, conf e return ops } -func scheduleEvictLeaderOnce(name, typ string, cluster schedule.Cluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderOnce(name, typ string, cluster scheduling.ClusterInformer, conf evictLeaderStoresConf) []*operator.Operator { stores := conf.getStores() ops := make([]*operator.Operator, 0, len(stores)) for _, storeID := range stores { diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 70dde65a38c..e0ceee57741 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -22,6 +22,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) @@ -109,7 +110,7 @@ func (s *evictSlowStoreScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictSlowStoreScheduler) Prepare(cluster schedule.Cluster) error { +func (s *evictSlowStoreScheduler) Prepare(cluster scheduling.ClusterInformer) error { evictStore := s.conf.evictStore() if evictStore != 0 { return cluster.SlowStoreEvicted(evictStore) @@ -117,11 +118,11 @@ func (s *evictSlowStoreScheduler) Prepare(cluster schedule.Cluster) error { return nil } -func (s *evictSlowStoreScheduler) Cleanup(cluster schedule.Cluster) { +func (s *evictSlowStoreScheduler) Cleanup(cluster scheduling.ClusterInformer) { s.cleanupEvictLeader(cluster) } -func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster schedule.Cluster, storeID uint64) error { +func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster scheduling.ClusterInformer, storeID uint64) error { err := s.conf.setStoreAndPersist(storeID) if err != nil { log.Info("evict-slow-store-scheduler persist config failed", zap.Uint64("store-id", storeID)) @@ -131,7 +132,7 @@ func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster schedule.Cluster, s return cluster.SlowStoreEvicted(storeID) } -func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster schedule.Cluster) { +func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster scheduling.ClusterInformer) { evictSlowStore, err := s.conf.clearAndPersist() if err != nil { log.Info("evict-slow-store-scheduler persist config failed", zap.Uint64("store-id", evictSlowStore)) @@ -142,11 +143,11 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster schedule.Cluster) { cluster.SlowStoreRecovered(evictSlowStore) } -func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster schedule.Cluster) []*operator.Operator { +func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster scheduling.ClusterInformer) []*operator.Operator { return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) } -func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { if s.conf.evictStore() != 0 { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { @@ -157,7 +158,7 @@ func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster schedule.Cluster) bo return true } -func (s *evictSlowStoreScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowStoreScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { evictSlowStoreCounter.Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index e94aa38e420..b8ec5a8732d 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" ) @@ -108,7 +109,7 @@ func (conf *evictSlowTrendSchedulerConfig) setStoreAndPersist(id uint64) error { return conf.Persist() } -func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster schedule.Cluster) (oldID uint64, err error) { +func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster scheduling.ClusterInformer) (oldID uint64, err error) { oldID = conf.evictedStore() if oldID == 0 { return @@ -140,7 +141,7 @@ func (s *evictSlowTrendScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictSlowTrendScheduler) Prepare(cluster schedule.Cluster) error { +func (s *evictSlowTrendScheduler) Prepare(cluster scheduling.ClusterInformer) error { evictedStoreID := s.conf.evictedStore() if evictedStoreID == 0 { return nil @@ -148,11 +149,11 @@ func (s *evictSlowTrendScheduler) Prepare(cluster schedule.Cluster) error { return cluster.SlowTrendEvicted(evictedStoreID) } -func (s *evictSlowTrendScheduler) Cleanup(cluster schedule.Cluster) { +func (s *evictSlowTrendScheduler) Cleanup(cluster scheduling.ClusterInformer) { s.cleanupEvictLeader(cluster) } -func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster schedule.Cluster, storeID uint64) error { +func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster scheduling.ClusterInformer, storeID uint64) error { err := s.conf.setStoreAndPersist(storeID) if err != nil { log.Info("evict-slow-trend-scheduler persist config failed", zap.Uint64("store-id", storeID)) @@ -161,7 +162,7 @@ func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster schedule.Cluster, s return cluster.SlowTrendEvicted(storeID) } -func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster schedule.Cluster) { +func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster scheduling.ClusterInformer) { evictedStoreID, err := s.conf.clearAndPersist(cluster) if err != nil { log.Info("evict-slow-trend-scheduler persist config failed", zap.Uint64("store-id", evictedStoreID)) @@ -171,7 +172,7 @@ func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster schedule.Cluster) { } } -func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster schedule.Cluster) []*operator.Operator { +func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster scheduling.ClusterInformer) []*operator.Operator { store := cluster.GetStore(s.conf.evictedStore()) if store == nil { return nil @@ -180,7 +181,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster schedule.Cluster) return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) } -func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { if s.conf.evictedStore() == 0 { return true } @@ -191,7 +192,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster schedule.Cluster) bo return allowed } -func (s *evictSlowTrendScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowTrendScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() var ops []*operator.Operator @@ -271,7 +272,7 @@ func newEvictSlowTrendScheduler(opController *schedule.OperatorController, conf } } -func chooseEvictCandidate(cluster schedule.Cluster) (slowStore *core.StoreInfo) { +func chooseEvictCandidate(cluster scheduling.ClusterInformer) (slowStore *core.StoreInfo) { stores := cluster.GetStores() if len(stores) < 3 { storeSlowTrendActionStatusGauge.WithLabelValues("cand.none:too-few").Inc() @@ -331,7 +332,7 @@ func chooseEvictCandidate(cluster schedule.Cluster) (slowStore *core.StoreInfo) return store } -func checkStoresAreUpdated(cluster schedule.Cluster, slowStoreID uint64, slowStoreRecordTS time.Time) bool { +func checkStoresAreUpdated(cluster scheduling.ClusterInformer, slowStoreID uint64, slowStoreRecordTS time.Time) bool { stores := cluster.GetStores() if len(stores) <= 1 { return false @@ -360,7 +361,7 @@ func checkStoresAreUpdated(cluster schedule.Cluster, slowStoreID uint64, slowSto return updatedStores >= expected } -func checkStoreSlowerThanOthers(cluster schedule.Cluster, target *core.StoreInfo) bool { +func checkStoreSlowerThanOthers(cluster scheduling.ClusterInformer, target *core.StoreInfo) bool { stores := cluster.GetStores() expected := (len(stores)*2 + 1) / 3 targetSlowTrend := target.GetSlowTrend() @@ -391,7 +392,7 @@ func checkStoreSlowerThanOthers(cluster schedule.Cluster, target *core.StoreInfo return slowerThanStoresNum >= expected } -func checkStoreCanRecover(cluster schedule.Cluster, target *core.StoreInfo) bool { +func checkStoreCanRecover(cluster scheduling.ClusterInformer, target *core.StoreInfo) bool { /* // // This might not be necessary, @@ -414,7 +415,7 @@ func checkStoreCanRecover(cluster schedule.Cluster, target *core.StoreInfo) bool return checkStoreFasterThanOthers(cluster, target) } -func checkStoreFasterThanOthers(cluster schedule.Cluster, target *core.StoreInfo) bool { +func checkStoreFasterThanOthers(cluster scheduling.ClusterInformer, target *core.StoreInfo) bool { stores := cluster.GetStores() expected := (len(stores) + 1) / 2 targetSlowTrend := target.GetSlowTrend() diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 093141bdc5f..d2b72aae30b 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage/endpoint" @@ -54,7 +55,7 @@ var ( type grantHotRegionSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage - cluster schedule.Cluster + cluster scheduling.ClusterInformer StoreIDs []uint64 `json:"store-id"` StoreLeaderID uint64 `json:"store-leader-id"` } @@ -151,7 +152,7 @@ func (s *grantHotRegionScheduler) EncodeConfig() ([]byte, error) { // IsScheduleAllowed returns whether the scheduler is allowed to schedule. // TODO it should check if there is any scheduler such as evict or hot region scheduler -func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !regionAllowed { @@ -225,14 +226,14 @@ func newGrantHotRegionHandler(config *grantHotRegionSchedulerConfig) http.Handle return router } -func (s *grantHotRegionScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantHotRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { grantHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) return s.dispatch(rw, cluster), nil } -func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster schedule.Cluster) []*operator.Operator { +func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster scheduling.ClusterInformer) []*operator.Operator { stLoadInfos := s.stLoadInfos[buildResourceType(typ, constant.RegionKind)] infos := make([]*statistics.StoreLoadDetail, len(stLoadInfos)) index := 0 @@ -246,7 +247,7 @@ func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster schedu return s.randomSchedule(cluster, infos) } -func (s *grantHotRegionScheduler) randomSchedule(cluster schedule.Cluster, srcStores []*statistics.StoreLoadDetail) (ops []*operator.Operator) { +func (s *grantHotRegionScheduler) randomSchedule(cluster scheduling.ClusterInformer, srcStores []*statistics.StoreLoadDetail) (ops []*operator.Operator) { isLeader := s.r.Int()%2 == 1 for _, srcStore := range srcStores { srcStoreID := srcStore.GetID() @@ -277,7 +278,7 @@ func (s *grantHotRegionScheduler) randomSchedule(cluster schedule.Cluster, srcSt return nil } -func (s *grantHotRegionScheduler) transfer(cluster schedule.Cluster, regionID uint64, srcStoreID uint64, isLeader bool) (op *operator.Operator, err error) { +func (s *grantHotRegionScheduler) transfer(cluster scheduling.ClusterInformer, regionID uint64, srcStoreID uint64, isLeader bool) (op *operator.Operator, err error) { srcRegion := cluster.GetRegion(regionID) if srcRegion == nil || len(srcRegion.GetDownPeers()) != 0 || len(srcRegion.GetPendingPeers()) != 0 { return nil, errs.ErrRegionRuleNotFound diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 5f504d05172..160bd05cc3f 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -52,7 +53,7 @@ type grantLeaderSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWithRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` - cluster schedule.Cluster + cluster scheduling.ClusterInformer } func (conf *grantLeaderSchedulerConfig) BuildWithArgs(args []string) error { @@ -177,7 +178,7 @@ func (s *grantLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *grantLeaderScheduler) Prepare(cluster schedule.Cluster) error { +func (s *grantLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -189,7 +190,7 @@ func (s *grantLeaderScheduler) Prepare(cluster schedule.Cluster) error { return res } -func (s *grantLeaderScheduler) Cleanup(cluster schedule.Cluster) { +func (s *grantLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWithRanges { @@ -197,7 +198,7 @@ func (s *grantLeaderScheduler) Cleanup(cluster schedule.Cluster) { } } -func (s *grantLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *grantLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -205,7 +206,7 @@ func (s *grantLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool return allowed } -func (s *grantLeaderScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { grantLeaderCounter.Inc() s.conf.mu.RLock() defer s.conf.mu.RUnlock() diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 9f25707fbc1..22edf79cbf7 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -36,6 +36,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/utils/keyutil" @@ -118,7 +119,7 @@ func newBaseHotScheduler(opController *schedule.OperatorController) *baseHotSche // prepareForBalance calculate the summary of pending Influence for each store and prepare the load detail for // each store, only update read or write load detail -func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster schedule.Cluster) { +func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster scheduling.ClusterInformer) { h.stInfos = statistics.SummaryStoreInfos(cluster.GetStores()) h.summaryPendingInfluence(cluster) h.storesLoads = cluster.GetStoresLoads() @@ -157,7 +158,7 @@ func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sched // summaryPendingInfluence calculate the summary of pending Influence for each store // and clean the region from regionInfluence if they have ended operator. // It makes each dim rate or count become `weight` times to the origin value. -func (h *baseHotScheduler) summaryPendingInfluence(cluster schedule.Cluster) { +func (h *baseHotScheduler) summaryPendingInfluence(cluster scheduling.ClusterInformer) { for id, p := range h.regionPendings { from := h.stInfos[p.from] to := h.stInfos[p.to] @@ -258,7 +259,7 @@ func (h *hotScheduler) GetNextInterval(interval time.Duration) time.Duration { return intervalGrow(h.GetMinInterval(), maxHotScheduleInterval, exponentialGrowth) } -func (h *hotScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (h *hotScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetOpts().GetHotRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc() @@ -266,13 +267,13 @@ func (h *hotScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { return allowed } -func (h *hotScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (h *hotScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { hotSchedulerCounter.Inc() rw := h.randomRWType() return h.dispatch(rw, cluster), nil } -func (h *hotScheduler) dispatch(typ statistics.RWType, cluster schedule.Cluster) []*operator.Operator { +func (h *hotScheduler) dispatch(typ statistics.RWType, cluster scheduling.ClusterInformer) []*operator.Operator { h.Lock() defer h.Unlock() h.prepareForBalance(typ, cluster) @@ -306,7 +307,7 @@ func (h *hotScheduler) tryAddPendingInfluence(op *operator.Operator, srcStore, d return true } -func (h *hotScheduler) balanceHotReadRegions(cluster schedule.Cluster) []*operator.Operator { +func (h *hotScheduler) balanceHotReadRegions(cluster scheduling.ClusterInformer) []*operator.Operator { leaderSolver := newBalanceSolver(h, cluster, statistics.Read, transferLeader) leaderOps := leaderSolver.solve() peerSolver := newBalanceSolver(h, cluster, statistics.Read, movePeer) @@ -349,7 +350,7 @@ func (h *hotScheduler) balanceHotReadRegions(cluster schedule.Cluster) []*operat return nil } -func (h *hotScheduler) balanceHotWriteRegions(cluster schedule.Cluster) []*operator.Operator { +func (h *hotScheduler) balanceHotWriteRegions(cluster scheduling.ClusterInformer) []*operator.Operator { // prefer to balance by peer s := h.r.Intn(100) switch { @@ -438,7 +439,7 @@ func isAvailableV1(s *solution) bool { } type balanceSolver struct { - schedule.Cluster + scheduling.ClusterInformer sche *hotScheduler stLoadDetail map[uint64]*statistics.StoreLoadDetail rwTy statistics.RWType @@ -559,7 +560,7 @@ func (bs *balanceSolver) isSelectedDim(dim int) bool { } func (bs *balanceSolver) getPriorities() []string { - querySupport := bs.sche.conf.checkQuerySupport(bs.Cluster) + querySupport := bs.sche.conf.checkQuerySupport(bs.ClusterInformer) // For read, transfer-leader and move-peer have the same priority config // For write, they are different switch bs.resourceTy { @@ -574,19 +575,19 @@ func (bs *balanceSolver) getPriorities() []string { return []string{} } -func newBalanceSolver(sche *hotScheduler, cluster schedule.Cluster, rwTy statistics.RWType, opTy opType) *balanceSolver { +func newBalanceSolver(sche *hotScheduler, cluster scheduling.ClusterInformer, rwTy statistics.RWType, opTy opType) *balanceSolver { bs := &balanceSolver{ - Cluster: cluster, - sche: sche, - rwTy: rwTy, - opTy: opTy, + ClusterInformer: cluster, + sche: sche, + rwTy: rwTy, + opTy: opTy, } bs.init() return bs } func (bs *balanceSolver) isValid() bool { - if bs.Cluster == nil || bs.sche == nil || bs.stLoadDetail == nil { + if bs.ClusterInformer == nil || bs.sche == nil || bs.stLoadDetail == nil { return false } return true @@ -905,7 +906,7 @@ func (bs *balanceSolver) isRegionAvailable(region *core.RegionInfo) bool { return false } - if !filter.IsRegionReplicated(bs.Cluster, region) { + if !filter.IsRegionReplicated(bs.ClusterInformer, region) { log.Debug("region has abnormal replica count", zap.String("scheduler", bs.sche.GetName()), zap.Uint64("region-id", region.GetID())) hotSchedulerAbnormalReplicaCounter.Inc() return false @@ -1478,7 +1479,7 @@ func (bs *balanceSolver) createSplitOperator(regions []*core.RegionInfo) []*oper for i, region := range regions { ids[i] = region.GetID() } - hotBuckets := bs.Cluster.BucketsStats(bs.minHotDegree, ids...) + hotBuckets := bs.ClusterInformer.BucketsStats(bs.minHotDegree, ids...) operators := make([]*operator.Operator, 0) createFunc := func(region *core.RegionInfo) { diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index b74af17404d..875c0792f89 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage/endpoint" @@ -433,7 +434,7 @@ func (conf *hotRegionSchedulerConfig) persistLocked() error { return conf.storage.SaveScheduleConfig(HotRegionName, data) } -func (conf *hotRegionSchedulerConfig) checkQuerySupport(cluster schedule.Cluster) bool { +func (conf *hotRegionSchedulerConfig) checkQuerySupport(cluster scheduling.ClusterInformer) bool { querySupport := versioninfo.IsFeatureSupported(cluster.GetOpts().GetClusterVersion(), versioninfo.HotScheduleWithQuery) conf.Lock() defer conf.Unlock() diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index e150e6530fc..63375fed27d 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -75,7 +76,7 @@ func (s *labelScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *labelScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *labelScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -83,7 +84,7 @@ func (s *labelScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { return allowed } -func (s *labelScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *labelScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { labelCounter.Inc() stores := cluster.GetStores() rejectLeaderStores := make(map[uint64]struct{}) diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index f324dbc1599..ca60909f1b1 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -77,7 +78,7 @@ func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *randomMergeScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *randomMergeScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetOpts().GetMergeScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpMerge.String()).Inc() @@ -85,7 +86,7 @@ func (s *randomMergeScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool return allowed } -func (s *randomMergeScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *randomMergeScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). @@ -128,7 +129,7 @@ func (s *randomMergeScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ( return ops, nil } -func (s *randomMergeScheduler) allowMerge(cluster schedule.Cluster, region, target *core.RegionInfo) bool { +func (s *randomMergeScheduler) allowMerge(cluster scheduling.ClusterInformer, region, target *core.RegionInfo) bool { if !filter.IsRegionHealthy(region) || !filter.IsRegionHealthy(target) { return false } diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 8ca06ed50d8..1b81335b286 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -25,6 +25,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -168,11 +169,11 @@ func (l *scatterRangeScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(l.config) } -func (l *scatterRangeScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (l *scatterRangeScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { return l.allowBalanceLeader(cluster) || l.allowBalanceRegion(cluster) } -func (l *scatterRangeScheduler) allowBalanceLeader(cluster schedule.Cluster) bool { +func (l *scatterRangeScheduler) allowBalanceLeader(cluster scheduling.ClusterInformer) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() @@ -180,7 +181,7 @@ func (l *scatterRangeScheduler) allowBalanceLeader(cluster schedule.Cluster) boo return allowed } -func (l *scatterRangeScheduler) allowBalanceRegion(cluster schedule.Cluster) bool { +func (l *scatterRangeScheduler) allowBalanceRegion(cluster scheduling.ClusterInformer) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetOpts().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpRegion.String()).Inc() @@ -188,7 +189,7 @@ func (l *scatterRangeScheduler) allowBalanceRegion(cluster schedule.Cluster) boo return allowed } -func (l *scatterRangeScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *scatterRangeScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { scatterRangeCounter.Inc() // isolate a new cluster according to the key range c := schedule.GenRangeCluster(cluster, l.config.GetStartKey(), l.config.GetEndKey()) diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index 1bb7179fa9c..e73e48eb184 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics" "go.uber.org/zap" ) @@ -77,7 +78,7 @@ func (s *shuffleHotRegionScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { hotRegionAllowed := s.OpController.OperatorCount(operator.OpHotRegion) < s.conf.Limit regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() @@ -93,7 +94,7 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) return hotRegionAllowed && regionAllowed && leaderAllowed } -func (s *shuffleHotRegionScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleHotRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { shuffleHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -101,7 +102,7 @@ func (s *shuffleHotRegionScheduler) Schedule(cluster schedule.Cluster, dryRun bo return operators, nil } -func (s *shuffleHotRegionScheduler) randomSchedule(cluster schedule.Cluster, loadDetail map[uint64]*statistics.StoreLoadDetail) []*operator.Operator { +func (s *shuffleHotRegionScheduler) randomSchedule(cluster scheduling.ClusterInformer, loadDetail map[uint64]*statistics.StoreLoadDetail) []*operator.Operator { for _, detail := range loadDetail { if len(detail.HotPeers) < 1 { continue diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 42b8dcd325d..3ee8f4f04e6 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -78,7 +79,7 @@ func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -86,7 +87,7 @@ func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) boo return allowed } -func (s *shuffleLeaderScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { // We shuffle leaders between stores by: // 1. random select a valid store. // 2. transfer a leader to the store. diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index 3555dc15160..8fed838aab1 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -80,7 +81,7 @@ func (s *shuffleRegionScheduler) EncodeConfig() ([]byte, error) { return s.conf.EncodeConfig() } -func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() @@ -88,7 +89,7 @@ func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster schedule.Cluster) boo return allowed } -func (s *shuffleRegionScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { shuffleRegionCounter.Inc() region, oldPeer := s.scheduleRemovePeer(cluster) if region == nil { @@ -112,7 +113,7 @@ func (s *shuffleRegionScheduler) Schedule(cluster schedule.Cluster, dryRun bool) return []*operator.Operator{op}, nil } -func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer) { +func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster scheduling.ClusterInformer) (*core.RegionInfo, *metapb.Peer) { candidates := filter.NewCandidates(cluster.GetStores()). FilterSource(cluster.GetOpts(), nil, nil, s.filters...). Shuffle() @@ -144,7 +145,7 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster schedule.Cluster) (* return nil, nil } -func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { +func (s *shuffleRegionScheduler) scheduleAddPeer(cluster scheduling.ClusterInformer, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { store := cluster.GetStore(oldPeer.GetStoreId()) if store == nil { return nil diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 95831a1f4c1..03d9d8ca232 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/syncutil" @@ -166,7 +167,7 @@ func (s *splitBucketScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) } // IsScheduleAllowed return true if the sum of executing opSplit operator is less . -func (s *splitBucketScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *splitBucketScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { if !cluster.GetStoreConfig().IsEnableRegionBucket() { splitBucketDisableCounter.Inc() return false @@ -181,13 +182,13 @@ func (s *splitBucketScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool type splitBucketPlan struct { hotBuckets map[uint64][]*buckets.BucketStat - cluster schedule.Cluster + cluster scheduling.ClusterInformer conf *splitBucketSchedulerConfig hotRegionSplitSize int64 } // Schedule return operators if some bucket is too hot. -func (s *splitBucketScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *splitBucketScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { splitBucketScheduleCounter.Inc() conf := s.conf.Clone() plan := &splitBucketPlan{ diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index ada7a026e1b..6af0545feac 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -67,16 +68,16 @@ func (s *trasferWitnessLeaderScheduler) GetType() string { return TransferWitnessLeaderType } -func (s *trasferWitnessLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *trasferWitnessLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { return true } -func (s *trasferWitnessLeaderScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *trasferWitnessLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil } -func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster schedule.Cluster, batchSize int) []*operator.Operator { +func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster scheduling.ClusterInformer, batchSize int) []*operator.Operator { var ops []*operator.Operator for i := 0; i < batchSize; i++ { select { @@ -98,7 +99,7 @@ func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, return ops } -func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster schedule.Cluster, region *core.RegionInfo) (*operator.Operator, error) { +func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster scheduling.ClusterInformer, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { diff --git a/pkg/schedule/schedulers/utils.go b/pkg/schedule/schedulers/utils.go index 7f2ba3281f5..a6626be2d62 100644 --- a/pkg/schedule/schedulers/utils.go +++ b/pkg/schedule/schedulers/utils.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics" "go.uber.org/zap" ) @@ -43,7 +44,7 @@ const ( type solver struct { *balanceSchedulerPlan - schedule.Cluster + scheduling.ClusterInformer kind constant.ScheduleKind opInfluence operator.OpInfluence tolerantSizeRatio float64 @@ -54,10 +55,10 @@ type solver struct { targetScore float64 } -func newSolver(basePlan *balanceSchedulerPlan, kind constant.ScheduleKind, cluster schedule.Cluster, opInfluence operator.OpInfluence) *solver { +func newSolver(basePlan *balanceSchedulerPlan, kind constant.ScheduleKind, cluster scheduling.ClusterInformer, opInfluence operator.OpInfluence) *solver { return &solver{ balanceSchedulerPlan: basePlan, - Cluster: cluster, + ClusterInformer: cluster, kind: kind, opInfluence: opInfluence, tolerantSizeRatio: adjustTolerantRatio(cluster, kind), @@ -181,7 +182,7 @@ func (p *solver) getTolerantResource() int64 { return p.tolerantSource } -func adjustTolerantRatio(cluster schedule.Cluster, kind constant.ScheduleKind) float64 { +func adjustTolerantRatio(cluster scheduling.ClusterInformer, kind constant.ScheduleKind) float64 { var tolerantSizeRatio float64 switch c := cluster.(type) { case *schedule.RangeCluster: diff --git a/pkg/schedule/cluster.go b/pkg/schedule/scheduling/cluster_informer.go similarity index 58% rename from pkg/schedule/cluster.go rename to pkg/schedule/scheduling/cluster_informer.go index da4bf41ef24..4280edb7117 100644 --- a/pkg/schedule/cluster.go +++ b/pkg/schedule/scheduling/cluster_informer.go @@ -1,4 +1,4 @@ -// Copyright 2017 TiKV Project Authors. +// Copyright 2020 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,29 +12,43 @@ // See the License for the specific language governing permissions and // limitations under the License. -package schedule +package scheduling import ( "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/schedule/operator" + "github.com/tikv/pd/pkg/id" + "github.com/tikv/pd/pkg/schedule/config" + "github.com/tikv/pd/pkg/schedule/labeler" + "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/storage" ) -// Cluster provides an overview of a cluster's regions distribution. -type Cluster interface { - core.RegionSetInformer - core.StoreSetInformer - core.StoreSetController - +// ClusterInformer provides the necessary information of a cluster. +type ClusterInformer interface { + RegionHealthCluster statistics.RegionStatInformer statistics.StoreStatInformer buckets.BucketStatInformer - operator.ClusterInformer - + GetBasicCluster() *core.BasicCluster + GetStoreConfig() config.StoreConfig + GetAllocator() id.Allocator + GetRegionLabeler() *labeler.RegionLabeler + GetStorage() storage.Storage RemoveScheduler(name string) error AddSuspectRegions(ids ...uint64) SetHotPendingInfluenceMetrics(storeLabel, rwTy, dim string, load float64) RecordOpStepWithTTL(regionID uint64) } + +// RegionHealthCluster is an aggregate interface that wraps multiple interfaces +type RegionHealthCluster interface { + core.StoreSetInformer + core.StoreSetController + core.RegionSetInformer + + GetOpts() config.Config + GetRuleManager() *placement.RuleManager +} diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 348f8ca971f..0cbd1a2e95f 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -95,7 +96,7 @@ type evictLeaderSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWitRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` - cluster schedule.Cluster + cluster scheduling.ClusterInformer } func (conf *evictLeaderSchedulerConfig) BuildWithArgs(args []string) error { @@ -186,7 +187,7 @@ func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictLeaderScheduler) Prepare(cluster schedule.Cluster) error { +func (s *evictLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -198,7 +199,7 @@ func (s *evictLeaderScheduler) Prepare(cluster schedule.Cluster) error { return res } -func (s *evictLeaderScheduler) Cleanup(cluster schedule.Cluster) { +func (s *evictLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWitRanges { @@ -206,7 +207,7 @@ func (s *evictLeaderScheduler) Cleanup(cluster schedule.Cluster) { } } -func (s *evictLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *evictLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -214,7 +215,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool return allowed } -func (s *evictLeaderScheduler) Schedule(cluster schedule.Cluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { ops := make([]*operator.Operator, 0, len(s.conf.StoreIDWitRanges)) s.conf.mu.RLock() defer s.conf.mu.RUnlock() diff --git a/server/cluster/coordinator_test.go b/server/cluster/coordinator_test.go index fe0ac426ba9..363084add54 100644 --- a/server/cluster/coordinator_test.go +++ b/server/cluster/coordinator_test.go @@ -37,6 +37,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/schedulers" + sche "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" @@ -1271,7 +1272,7 @@ type mockLimitScheduler struct { kind operator.OpKind } -func (s *mockLimitScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { +func (s *mockLimitScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { return s.counter.OperatorCount(s.kind) < s.limit } diff --git a/server/replication/replication_mode.go b/server/replication/replication_mode.go index f1933db16ca..183c496d6cf 100644 --- a/server/replication/replication_mode.go +++ b/server/replication/replication_mode.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule" + "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/logutil" @@ -72,7 +72,7 @@ type ModeManager struct { syncutil.RWMutex config config.ReplicationModeConfig storage endpoint.ReplicationStatusStorage - cluster schedule.Cluster + cluster scheduling.ClusterInformer fileReplicater FileReplicater replicatedMembers []uint64 @@ -91,7 +91,7 @@ type ModeManager struct { } // NewReplicationModeManager creates the replicate mode manager. -func NewReplicationModeManager(config config.ReplicationModeConfig, storage endpoint.ReplicationStatusStorage, cluster schedule.Cluster, fileReplicater FileReplicater) (*ModeManager, error) { +func NewReplicationModeManager(config config.ReplicationModeConfig, storage endpoint.ReplicationStatusStorage, cluster scheduling.ClusterInformer, fileReplicater FileReplicater) (*ModeManager, error) { m := &ModeManager{ initTime: time.Now(), config: config, From 3b18d11b5e9a190c05a604131188645a8289eac8 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 22 May 2023 17:00:25 +0800 Subject: [PATCH 2/2] address the comment Signed-off-by: Ryan Leung --- pkg/keyspace/keyspace.go | 6 ++-- pkg/schedule/checker/checker_controller.go | 6 ++-- pkg/schedule/checker/joint_state_checker.go | 6 ++-- pkg/schedule/checker/learner_checker.go | 6 ++-- pkg/schedule/checker/merge_checker.go | 10 +++--- pkg/schedule/checker/priority_inspector.go | 6 ++-- pkg/schedule/checker/replica_checker.go | 6 ++-- pkg/schedule/checker/replica_strategy.go | 4 +-- pkg/schedule/checker/rule_checker.go | 8 ++--- pkg/schedule/checker/split_checker.go | 6 ++-- .../{scheduling => core}/cluster_informer.go | 4 +-- pkg/schedule/filter/healthy.go | 8 ++--- pkg/schedule/filter/region_filters.go | 12 +++---- pkg/schedule/operator/builder.go | 6 ++-- pkg/schedule/operator/create_operator.go | 34 +++++++++---------- pkg/schedule/operator/step.go | 30 ++++++++-------- pkg/schedule/operator_controller.go | 16 ++++----- pkg/schedule/range_cluster.go | 6 ++-- pkg/schedule/region_scatterer.go | 6 ++-- pkg/schedule/region_splitter.go | 10 +++--- pkg/schedule/scheduler.go | 10 +++--- pkg/schedule/schedulers/balance_leader.go | 6 ++-- pkg/schedule/schedulers/balance_region.go | 6 ++-- pkg/schedule/schedulers/balance_witness.go | 6 ++-- pkg/schedule/schedulers/base_scheduler.go | 6 ++-- pkg/schedule/schedulers/evict_leader.go | 16 ++++----- pkg/schedule/schedulers/evict_slow_store.go | 16 ++++----- pkg/schedule/schedulers/evict_slow_trend.go | 31 ++++++++--------- pkg/schedule/schedulers/grant_hot_region.go | 14 ++++---- pkg/schedule/schedulers/grant_leader.go | 12 +++---- pkg/schedule/schedulers/hot_region.go | 20 +++++------ pkg/schedule/schedulers/hot_region_config.go | 4 +-- pkg/schedule/schedulers/label.go | 6 ++-- pkg/schedule/schedulers/random_merge.go | 8 ++--- pkg/schedule/schedulers/scatter_range.go | 10 +++--- pkg/schedule/schedulers/shuffle_hot_region.go | 8 ++--- pkg/schedule/schedulers/shuffle_leader.go | 6 ++-- pkg/schedule/schedulers/shuffle_region.go | 10 +++--- pkg/schedule/schedulers/split_bucket.go | 8 ++--- .../schedulers/transfer_witness_leader.go | 10 +++--- pkg/schedule/schedulers/utils.go | 8 ++--- plugin/scheduler_example/evict_leader.go | 12 +++---- server/cluster/coordinator_test.go | 2 +- server/replication/replication_mode.go | 6 ++-- 44 files changed, 218 insertions(+), 219 deletions(-) rename pkg/schedule/{scheduling => core}/cluster_informer.go (96%) diff --git a/pkg/keyspace/keyspace.go b/pkg/keyspace/keyspace.go index 87eaa628517..f244f392a15 100644 --- a/pkg/keyspace/keyspace.go +++ b/pkg/keyspace/keyspace.go @@ -26,8 +26,8 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/id" "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/labeler" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" @@ -71,7 +71,7 @@ type Manager struct { // store is the storage for keyspace related information. store endpoint.KeyspaceStorage // rc is the raft cluster of the server. - cluster scheduling.ClusterInformer + cluster core.ClusterInformer // ctx is the context of the manager, to be used in transaction. ctx context.Context // config is the configurations of the manager. @@ -98,7 +98,7 @@ type CreateKeyspaceRequest struct { func NewKeyspaceManager( ctx context.Context, store endpoint.KeyspaceStorage, - cluster scheduling.ClusterInformer, + cluster core.ClusterInformer, idAllocator id.Allocator, config Config, kgm *GroupManager, diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index d95a695f2d6..eb2240efbaf 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -24,10 +24,10 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/config" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/keyutil" ) @@ -38,7 +38,7 @@ var denyCheckersByLabelerCounter = schedule.LabelerEventCounter.WithLabelValues( // Controller is used to manage all checkers. type Controller struct { - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer conf config.Config opController *schedule.OperatorController learnerChecker *LearnerChecker @@ -54,7 +54,7 @@ type Controller struct { } // NewController create a new Controller. -func NewController(ctx context.Context, cluster scheduling.ClusterInformer, conf config.Config, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *schedule.OperatorController) *Controller { +func NewController(ctx context.Context, cluster sche.ClusterInformer, conf config.Config, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *schedule.OperatorController) *Controller { regionWaitingList := cache.NewDefaultCache(DefaultCacheSize) return &Controller{ cluster: cluster, diff --git a/pkg/schedule/checker/joint_state_checker.go b/pkg/schedule/checker/joint_state_checker.go index c2ef2af41ae..fdd24a5f3cd 100644 --- a/pkg/schedule/checker/joint_state_checker.go +++ b/pkg/schedule/checker/joint_state_checker.go @@ -19,14 +19,14 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/schedule/scheduling" ) // JointStateChecker ensures region is in joint state will leave. type JointStateChecker struct { PauseController - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer } const jointStateCheckerName = "joint_state_checker" @@ -41,7 +41,7 @@ var ( ) // NewJointStateChecker creates a joint state checker. -func NewJointStateChecker(cluster scheduling.ClusterInformer) *JointStateChecker { +func NewJointStateChecker(cluster sche.ClusterInformer) *JointStateChecker { return &JointStateChecker{ cluster: cluster, } diff --git a/pkg/schedule/checker/learner_checker.go b/pkg/schedule/checker/learner_checker.go index dba310fe501..52132ca7018 100644 --- a/pkg/schedule/checker/learner_checker.go +++ b/pkg/schedule/checker/learner_checker.go @@ -18,14 +18,14 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/schedule/scheduling" ) // LearnerChecker ensures region has a learner will be promoted. type LearnerChecker struct { PauseController - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer } var ( @@ -34,7 +34,7 @@ var ( ) // NewLearnerChecker creates a learner checker. -func NewLearnerChecker(cluster scheduling.ClusterInformer) *LearnerChecker { +func NewLearnerChecker(cluster sche.ClusterInformer) *LearnerChecker { return &LearnerChecker{ cluster: cluster, } diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index ec95660b3b6..b84a3b09d11 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -26,11 +26,11 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/config" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/logutil" ) @@ -76,14 +76,14 @@ var ( // MergeChecker ensures region to merge with adjacent region when size is small type MergeChecker struct { PauseController - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer conf config.Config splitCache *cache.TTLUint64 startTime time.Time // it's used to judge whether server recently start. } // NewMergeChecker creates a merge checker. -func NewMergeChecker(ctx context.Context, cluster scheduling.ClusterInformer, conf config.Config) *MergeChecker { +func NewMergeChecker(ctx context.Context, cluster sche.ClusterInformer, conf config.Config) *MergeChecker { splitCache := cache.NewIDTTL(ctx, time.Minute, conf.GetSplitMergeInterval()) return &MergeChecker{ cluster: cluster, @@ -250,7 +250,7 @@ func (m *MergeChecker) checkTarget(region, adjacent *core.RegionInfo) bool { } // AllowMerge returns true if two regions can be merged according to the key type. -func AllowMerge(cluster scheduling.ClusterInformer, region, adjacent *core.RegionInfo) bool { +func AllowMerge(cluster sche.ClusterInformer, region, adjacent *core.RegionInfo) bool { var start, end []byte if bytes.Equal(region.GetEndKey(), adjacent.GetStartKey()) && len(region.GetEndKey()) != 0 { start, end = region.GetStartKey(), adjacent.GetEndKey() @@ -306,7 +306,7 @@ func isTableIDSame(region, adjacent *core.RegionInfo) bool { // Check whether there is a peer of the adjacent region on an offline store, // while the source region has no peer on it. This is to prevent from bringing // any other peer into an offline store to slow down the offline process. -func checkPeerStore(cluster scheduling.ClusterInformer, region, adjacent *core.RegionInfo) bool { +func checkPeerStore(cluster sche.ClusterInformer, region, adjacent *core.RegionInfo) bool { regionStoreIDs := region.GetStoreIDs() for _, peer := range adjacent.GetPeers() { storeID := peer.GetStoreId() diff --git a/pkg/schedule/checker/priority_inspector.go b/pkg/schedule/checker/priority_inspector.go index 3913af809ee..adb94707033 100644 --- a/pkg/schedule/checker/priority_inspector.go +++ b/pkg/schedule/checker/priority_inspector.go @@ -20,8 +20,8 @@ import ( "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/config" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" ) // the default value of priority queue size @@ -29,13 +29,13 @@ const defaultPriorityQueueSize = 1280 // PriorityInspector ensures high priority region should run first type PriorityInspector struct { - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer conf config.Config queue *cache.PriorityQueue } // NewPriorityInspector creates a priority inspector. -func NewPriorityInspector(cluster scheduling.ClusterInformer, conf config.Config) *PriorityInspector { +func NewPriorityInspector(cluster sche.ClusterInformer, conf config.Config) *PriorityInspector { return &PriorityInspector{ cluster: cluster, conf: conf, diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index f8a36c99c06..f944993940f 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -24,8 +24,8 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/config" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -61,13 +61,13 @@ var ( // Location management, mainly used for cross data center deployment. type ReplicaChecker struct { PauseController - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer conf config.Config regionWaitingList cache.Cache } // NewReplicaChecker creates a replica checker. -func NewReplicaChecker(cluster scheduling.ClusterInformer, conf config.Config, regionWaitingList cache.Cache) *ReplicaChecker { +func NewReplicaChecker(cluster sche.ClusterInformer, conf config.Config, regionWaitingList cache.Cache) *ReplicaChecker { return &ReplicaChecker{ cluster: cluster, conf: conf, diff --git a/pkg/schedule/checker/replica_strategy.go b/pkg/schedule/checker/replica_strategy.go index 03b35606ce7..27e6301c3dc 100644 --- a/pkg/schedule/checker/replica_strategy.go +++ b/pkg/schedule/checker/replica_strategy.go @@ -18,8 +18,8 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" - "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -27,7 +27,7 @@ import ( // exists to allow replica_checker and rule_checker to reuse common logics. type ReplicaStrategy struct { checkerName string // replica-checker / rule-checker - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer locationLabels []string isolationLevel string region *core.RegionInfo diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 45509df9ad9..7c1e542cc7f 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -27,10 +27,10 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/zap" ) @@ -81,7 +81,7 @@ var ( // RuleChecker fix/improve region by placement rules. type RuleChecker struct { PauseController - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer ruleManager *placement.RuleManager name string regionWaitingList cache.Cache @@ -91,7 +91,7 @@ type RuleChecker struct { } // NewRuleChecker creates a checker instance. -func NewRuleChecker(ctx context.Context, cluster scheduling.ClusterInformer, ruleManager *placement.RuleManager, regionWaitingList cache.Cache) *RuleChecker { +func NewRuleChecker(ctx context.Context, cluster sche.ClusterInformer, ruleManager *placement.RuleManager, regionWaitingList cache.Cache) *RuleChecker { return &RuleChecker{ cluster: cluster, ruleManager: ruleManager, @@ -572,7 +572,7 @@ func (o *recorder) incOfflineLeaderCount(storeID uint64) { // Offline is triggered manually and only appears when the node makes some adjustments. here is an operator timeout / 2. var offlineCounterTTL = 5 * time.Minute -func (o *recorder) refresh(cluster scheduling.ClusterInformer) { +func (o *recorder) refresh(cluster sche.ClusterInformer) { // re-count the offlineLeaderCounter if the store is already tombstone or store is gone. if len(o.offlineLeaderCounter) > 0 && time.Since(o.lastUpdateTime) > offlineCounterTTL { needClean := false diff --git a/pkg/schedule/checker/split_checker.go b/pkg/schedule/checker/split_checker.go index 02db5514f0f..f3d0422e875 100644 --- a/pkg/schedule/checker/split_checker.go +++ b/pkg/schedule/checker/split_checker.go @@ -19,16 +19,16 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" ) // SplitChecker splits regions when the key range spans across rule/label boundary. type SplitChecker struct { PauseController - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer ruleManager *placement.RuleManager labeler *labeler.RegionLabeler } @@ -42,7 +42,7 @@ var ( ) // NewSplitChecker creates a new SplitChecker. -func NewSplitChecker(cluster scheduling.ClusterInformer, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler) *SplitChecker { +func NewSplitChecker(cluster sche.ClusterInformer, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler) *SplitChecker { return &SplitChecker{ cluster: cluster, ruleManager: ruleManager, diff --git a/pkg/schedule/scheduling/cluster_informer.go b/pkg/schedule/core/cluster_informer.go similarity index 96% rename from pkg/schedule/scheduling/cluster_informer.go rename to pkg/schedule/core/cluster_informer.go index 4280edb7117..8588f184469 100644 --- a/pkg/schedule/scheduling/cluster_informer.go +++ b/pkg/schedule/core/cluster_informer.go @@ -1,4 +1,4 @@ -// Copyright 2020 TiKV Project Authors. +// Copyright 2017 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package scheduling +package core import ( "github.com/tikv/pd/pkg/core" diff --git a/pkg/schedule/filter/healthy.go b/pkg/schedule/filter/healthy.go index c249bd3c432..ba4f196dc0b 100644 --- a/pkg/schedule/filter/healthy.go +++ b/pkg/schedule/filter/healthy.go @@ -16,7 +16,7 @@ package filter import ( "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/schedule/scheduling" + sche "github.com/tikv/pd/pkg/schedule/core" ) // IsRegionHealthy checks if a region is healthy for scheduling. It requires the @@ -42,17 +42,17 @@ func hasDownPeers(region *core.RegionInfo) bool { // IsRegionReplicated checks if a region is fully replicated. When placement // rules is enabled, its peers should fit corresponding rules. When placement // rules is disabled, it should have enough replicas and no any learner peer. -func IsRegionReplicated(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { +func IsRegionReplicated(cluster sche.RegionHealthCluster, region *core.RegionInfo) bool { if cluster.GetOpts().IsPlacementRulesEnabled() { return isRegionPlacementRuleSatisfied(cluster, region) } return isRegionReplicasSatisfied(cluster, region) } -func isRegionPlacementRuleSatisfied(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { +func isRegionPlacementRuleSatisfied(cluster sche.RegionHealthCluster, region *core.RegionInfo) bool { return cluster.GetRuleManager().FitRegion(cluster, region).IsSatisfied() } -func isRegionReplicasSatisfied(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { +func isRegionReplicasSatisfied(cluster sche.RegionHealthCluster, region *core.RegionInfo) bool { return len(region.GetLearners()) == 0 && len(region.GetPeers()) == cluster.GetOpts().GetMaxReplicas() } diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index 38b25dbb14f..88fd13dc78e 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -18,9 +18,9 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" ) @@ -100,12 +100,12 @@ func (f *regionDownFilter) Select(region *core.RegionInfo) *plan.Status { // RegionReplicatedFilter filters all unreplicated regions. type RegionReplicatedFilter struct { - cluster scheduling.RegionHealthCluster + cluster sche.RegionHealthCluster fit *placement.RegionFit } // NewRegionReplicatedFilter creates a RegionFilter that filters all unreplicated regions. -func NewRegionReplicatedFilter(cluster scheduling.RegionHealthCluster) RegionFilter { +func NewRegionReplicatedFilter(cluster sche.RegionHealthCluster) RegionFilter { return &RegionReplicatedFilter{cluster: cluster} } @@ -132,11 +132,11 @@ func (f *RegionReplicatedFilter) Select(region *core.RegionInfo) *plan.Status { } type regionEmptyFilter struct { - cluster scheduling.RegionHealthCluster + cluster sche.RegionHealthCluster } // NewRegionEmptyFilter returns creates a RegionFilter that filters all empty regions. -func NewRegionEmptyFilter(cluster scheduling.RegionHealthCluster) RegionFilter { +func NewRegionEmptyFilter(cluster sche.RegionHealthCluster) RegionFilter { return ®ionEmptyFilter{cluster: cluster} } @@ -148,7 +148,7 @@ func (f *regionEmptyFilter) Select(region *core.RegionInfo) *plan.Status { } // isEmptyRegionAllowBalance returns true if the region is not empty or the number of regions is too small. -func isEmptyRegionAllowBalance(cluster scheduling.RegionHealthCluster, region *core.RegionInfo) bool { +func isEmptyRegionAllowBalance(cluster sche.RegionHealthCluster, region *core.RegionInfo) bool { return region.GetApproximateSize() > core.EmptyRegionApproximateSize || cluster.GetRegionCount() < core.InitClusterRegionThreshold } diff --git a/pkg/schedule/operator/builder.go b/pkg/schedule/operator/builder.go index d3668fcec67..bde464d02ff 100644 --- a/pkg/schedule/operator/builder.go +++ b/pkg/schedule/operator/builder.go @@ -21,9 +21,9 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" ) @@ -40,7 +40,7 @@ import ( // according to various constraints. type Builder struct { // basic info - scheduling.ClusterInformer + sche.ClusterInformer desc string regionID uint64 regionEpoch *metapb.RegionEpoch @@ -92,7 +92,7 @@ func SkipPlacementRulesCheck(b *Builder) { } // NewBuilder creates a Builder. -func NewBuilder(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, opts ...BuilderOption) *Builder { +func NewBuilder(desc string, ci sche.ClusterInformer, region *core.RegionInfo, opts ...BuilderOption) *Builder { b := &Builder{ desc: desc, ClusterInformer: ci, diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index e1137e14aeb..b80bee2fb80 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -24,42 +24,42 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/logutil" "go.uber.org/zap" ) // CreateAddPeerOperator creates an operator that adds a new peer. -func CreateAddPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer, kind OpKind) (*Operator, error) { +func CreateAddPeerOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region). AddPeer(peer). Build(kind) } // CreateDemoteVoterOperator creates an operator that demotes a voter -func CreateDemoteVoterOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateDemoteVoterOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). DemoteVoter(peer.GetStoreId()). Build(0) } // CreatePromoteLearnerOperator creates an operator that promotes a learner. -func CreatePromoteLearnerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreatePromoteLearnerOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). PromoteLearner(peer.GetStoreId()). Build(0) } // CreateRemovePeerOperator creates an operator that removes a peer from region. -func CreateRemovePeerOperator(desc string, ci scheduling.ClusterInformer, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { +func CreateRemovePeerOperator(desc string, ci sche.ClusterInformer, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(storeID). Build(kind) } // CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. -func CreateTransferLeaderOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { +func CreateTransferLeaderOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck). SetLeader(targetStoreID). SetLeaders(targetStoreIDs). @@ -67,7 +67,7 @@ func CreateTransferLeaderOperator(desc string, ci scheduling.ClusterInformer, re } // CreateForceTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store forcible. -func CreateForceTransferLeaderOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { +func CreateForceTransferLeaderOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck, SkipPlacementRulesCheck). SetLeader(targetStoreID). EnableForceTargetLeader(). @@ -75,7 +75,7 @@ func CreateForceTransferLeaderOperator(desc string, ci scheduling.ClusterInforme } // CreateMoveRegionOperator creates an operator that moves a region to specified stores. -func CreateMoveRegionOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, roles map[uint64]placement.PeerRoleType) (*Operator, error) { +func CreateMoveRegionOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, kind OpKind, roles map[uint64]placement.PeerRoleType) (*Operator, error) { // construct the peers from roles oldPeers := region.GetPeers() peers := make(map[uint64]*metapb.Peer) @@ -97,7 +97,7 @@ func CreateMoveRegionOperator(desc string, ci scheduling.ClusterInformer, region } // CreateMovePeerOperator creates an operator that replaces an old peer with a new peer. -func CreateMovePeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { +func CreateMovePeerOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -105,7 +105,7 @@ func CreateMovePeerOperator(desc string, ci scheduling.ClusterInformer, region * } // CreateMoveWitnessOperator creates an operator that replaces an old witness with a new witness. -func CreateMoveWitnessOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64) (*Operator, error) { +func CreateMoveWitnessOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeNonWitness(sourceStoreID). BecomeWitness(targetStoreID). @@ -113,7 +113,7 @@ func CreateMoveWitnessOperator(desc string, ci scheduling.ClusterInformer, regio } // CreateReplaceLeaderPeerOperator creates an operator that replaces an old peer with a new peer, and move leader from old store firstly. -func CreateReplaceLeaderPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer, leader *metapb.Peer) (*Operator, error) { +func CreateReplaceLeaderPeerOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer, leader *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -122,7 +122,7 @@ func CreateReplaceLeaderPeerOperator(desc string, ci scheduling.ClusterInformer, } // CreateMoveLeaderOperator creates an operator that replaces an old leader with a new leader. -func CreateMoveLeaderOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { +func CreateMoveLeaderOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, kind OpKind, oldStore uint64, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). RemovePeer(oldStore). AddPeer(peer). @@ -157,7 +157,7 @@ func CreateSplitRegionOperator(desc string, region *core.RegionInfo, kind OpKind } // CreateMergeRegionOperator creates an operator that merge two region into one. -func CreateMergeRegionOperator(desc string, ci scheduling.ClusterInformer, source *core.RegionInfo, target *core.RegionInfo, kind OpKind) ([]*Operator, error) { +func CreateMergeRegionOperator(desc string, ci sche.ClusterInformer, source *core.RegionInfo, target *core.RegionInfo, kind OpKind) ([]*Operator, error) { if core.IsInJointState(source.GetPeers()...) || core.IsInJointState(target.GetPeers()...) { return nil, errors.Errorf("cannot merge regions which are in joint state") } @@ -215,7 +215,7 @@ func isRegionMatch(a, b *core.RegionInfo) bool { } // CreateScatterRegionOperator creates an operator that scatters the specified region. -func CreateScatterRegionOperator(desc string, ci scheduling.ClusterInformer, origin *core.RegionInfo, targetPeers map[uint64]*metapb.Peer, targetLeader uint64) (*Operator, error) { +func CreateScatterRegionOperator(desc string, ci sche.ClusterInformer, origin *core.RegionInfo, targetPeers map[uint64]*metapb.Peer, targetLeader uint64) (*Operator, error) { // randomly pick a leader. var ids []uint64 for id, peer := range targetPeers { @@ -243,7 +243,7 @@ func CreateScatterRegionOperator(desc string, ci scheduling.ClusterInformer, ori const OpDescLeaveJointState = "leave-joint-state" // CreateLeaveJointStateOperator creates an operator that let region leave joint state. -func CreateLeaveJointStateOperator(desc string, ci scheduling.ClusterInformer, origin *core.RegionInfo) (*Operator, error) { +func CreateLeaveJointStateOperator(desc string, ci sche.ClusterInformer, origin *core.RegionInfo) (*Operator, error) { b := NewBuilder(desc, ci, origin, SkipOriginJointStateCheck, SkipPlacementRulesCheck) if b.err == nil && !core.IsInJointState(origin.GetPeers()...) { @@ -303,14 +303,14 @@ func CreateLeaveJointStateOperator(desc string, ci scheduling.ClusterInformer, o } // CreateWitnessPeerOperator creates an operator that set a follower or learner peer with witness -func CreateWitnessPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateWitnessPeerOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeWitness(peer.GetStoreId()). Build(OpWitness) } // CreateNonWitnessPeerOperator creates an operator that set a peer with non-witness -func CreateNonWitnessPeerOperator(desc string, ci scheduling.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { +func CreateNonWitnessPeerOperator(desc string, ci sche.ClusterInformer, region *core.RegionInfo, peer *metapb.Peer) (*Operator, error) { return NewBuilder(desc, ci, region). BecomeNonWitness(peer.GetStoreId()). Build(OpWitness) diff --git a/pkg/schedule/operator/step.go b/pkg/schedule/operator/step.go index 9cc24d5de33..c7af12c8763 100644 --- a/pkg/schedule/operator/step.go +++ b/pkg/schedule/operator/step.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" - "github.com/tikv/pd/pkg/schedule/scheduling" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" ) @@ -54,7 +54,7 @@ type OpStep interface { fmt.Stringer ConfVerChanged(region *core.RegionInfo) uint64 IsFinish(region *core.RegionInfo) bool - CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error + CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error Influence(opInfluence OpInfluence, region *core.RegionInfo) Timeout(regionSize int64) time.Duration GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *pdpb.RegionHeartbeatResponse @@ -88,7 +88,7 @@ func (tl TransferLeader) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (tl TransferLeader) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { +func (tl TransferLeader) CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error { errList := make([]error, 0, len(tl.ToStores)+1) for _, storeID := range append(tl.ToStores, tl.ToStore) { peer := region.GetStorePeer(tl.ToStore) @@ -193,7 +193,7 @@ func (ap AddPeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) { } // CheckInProgress checks if the step is in the progress of advancing. -func (ap AddPeer) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { +func (ap AddPeer) CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, ap.ToStore); err != nil { return err } @@ -247,7 +247,7 @@ func (bw BecomeWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bw BecomeWitness) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { +func (bw BecomeWitness) CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, bw.StoreID); err != nil { return err } @@ -309,7 +309,7 @@ func (bn BecomeNonWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bn BecomeNonWitness) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { +func (bn BecomeNonWitness) CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, bn.StoreID); err != nil { return err } @@ -395,7 +395,7 @@ func (bsw BatchSwitchWitness) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (bsw BatchSwitchWitness) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { +func (bsw BatchSwitchWitness) CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error { for _, w := range bsw.ToWitnesses { if err := w.CheckInProgress(ci, region); err != nil { return err @@ -478,7 +478,7 @@ func (al AddLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (al AddLearner) CheckInProgress(ci scheduling.ClusterInformer, region *core.RegionInfo) error { +func (al AddLearner) CheckInProgress(ci sche.ClusterInformer, region *core.RegionInfo) error { if err := validateStore(ci, al.ToStore); err != nil { return err } @@ -564,7 +564,7 @@ func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (pl PromoteLearner) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { +func (pl PromoteLearner) CheckInProgress(_ sche.ClusterInformer, region *core.RegionInfo) error { peer := region.GetStorePeer(pl.ToStore) if peer.GetId() != pl.PeerID { return errors.New("peer does not exist") @@ -615,7 +615,7 @@ func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (rp RemovePeer) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { +func (rp RemovePeer) CheckInProgress(_ sche.ClusterInformer, region *core.RegionInfo) error { if rp.FromStore == region.GetLeader().GetStoreId() { return errors.New("cannot remove leader peer") } @@ -685,7 +685,7 @@ func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (mr MergeRegion) CheckInProgress(_ scheduling.ClusterInformer, _ *core.RegionInfo) error { +func (mr MergeRegion) CheckInProgress(_ sche.ClusterInformer, _ *core.RegionInfo) error { return nil } @@ -753,7 +753,7 @@ func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo } // CheckInProgress checks if the step is in the progress of advancing. -func (sr SplitRegion) CheckInProgress(_ scheduling.ClusterInformer, _ *core.RegionInfo) error { +func (sr SplitRegion) CheckInProgress(_ sche.ClusterInformer, _ *core.RegionInfo) error { return nil } @@ -878,7 +878,7 @@ func (cpe ChangePeerV2Enter) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpe ChangePeerV2Enter) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { +func (cpe ChangePeerV2Enter) CheckInProgress(_ sche.ClusterInformer, region *core.RegionInfo) error { inJointState, notInJointState := false, false for _, pl := range cpe.PromoteLearners { peer := region.GetStorePeer(pl.ToStore) @@ -1007,7 +1007,7 @@ func (cpl ChangePeerV2Leave) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpl ChangePeerV2Leave) CheckInProgress(_ scheduling.ClusterInformer, region *core.RegionInfo) error { +func (cpl ChangePeerV2Leave) CheckInProgress(_ sche.ClusterInformer, region *core.RegionInfo) error { inJointState, notInJointState, demoteLeader := false, false, false leaderStoreID := region.GetLeader().GetStoreId() @@ -1085,7 +1085,7 @@ func (cpl ChangePeerV2Leave) GetCmd(region *core.RegionInfo, useConfChangeV2 boo } } -func validateStore(ci scheduling.ClusterInformer, id uint64) error { +func validateStore(ci sche.ClusterInformer, id uint64) error { store := ci.GetBasicCluster().GetStore(id) if store == nil { return errors.New("target store does not exist") diff --git a/pkg/schedule/operator_controller.go b/pkg/schedule/operator_controller.go index 5ff7e699646..25fa334422b 100644 --- a/pkg/schedule/operator_controller.go +++ b/pkg/schedule/operator_controller.go @@ -29,9 +29,9 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/zap" @@ -59,7 +59,7 @@ var ( type OperatorController struct { syncutil.RWMutex ctx context.Context - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer operators map[uint64]*operator.Operator hbStreams *hbstream.HeartbeatStreams fastOperators *cache.TTLUint64 @@ -71,7 +71,7 @@ type OperatorController struct { } // NewOperatorController creates a OperatorController. -func NewOperatorController(ctx context.Context, cluster scheduling.ClusterInformer, hbStreams *hbstream.HeartbeatStreams) *OperatorController { +func NewOperatorController(ctx context.Context, cluster sche.ClusterInformer, hbStreams *hbstream.HeartbeatStreams) *OperatorController { return &OperatorController{ ctx: ctx, cluster: cluster, @@ -93,7 +93,7 @@ func (oc *OperatorController) Ctx() context.Context { } // GetCluster exports cluster to evict-scheduler for check store status. -func (oc *OperatorController) GetCluster() scheduling.ClusterInformer { +func (oc *OperatorController) GetCluster() sche.ClusterInformer { oc.RLock() defer oc.RUnlock() return oc.cluster @@ -716,7 +716,7 @@ func (oc *OperatorController) OperatorCount(kind operator.OpKind) uint64 { } // GetOpInfluence gets OpInfluence. -func (oc *OperatorController) GetOpInfluence(cluster scheduling.ClusterInformer) operator.OpInfluence { +func (oc *OperatorController) GetOpInfluence(cluster sche.ClusterInformer) operator.OpInfluence { influence := operator.OpInfluence{ StoresInfluence: make(map[uint64]*operator.StoreInfluence), } @@ -734,7 +734,7 @@ func (oc *OperatorController) GetOpInfluence(cluster scheduling.ClusterInformer) } // GetFastOpInfluence get fast finish operator influence -func (oc *OperatorController) GetFastOpInfluence(cluster scheduling.ClusterInformer, influence operator.OpInfluence) { +func (oc *OperatorController) GetFastOpInfluence(cluster sche.ClusterInformer, influence operator.OpInfluence) { for _, id := range oc.fastOperators.GetAllID() { value, ok := oc.fastOperators.Get(id) if !ok { @@ -749,13 +749,13 @@ func (oc *OperatorController) GetFastOpInfluence(cluster scheduling.ClusterInfor } // AddOpInfluence add operator influence for cluster -func AddOpInfluence(op *operator.Operator, influence operator.OpInfluence, cluster scheduling.ClusterInformer) { +func AddOpInfluence(op *operator.Operator, influence operator.OpInfluence, cluster sche.ClusterInformer) { region := cluster.GetRegion(op.RegionID()) op.TotalInfluence(influence, region) } // NewTotalOpInfluence creates a OpInfluence. -func NewTotalOpInfluence(operators []*operator.Operator, cluster scheduling.ClusterInformer) operator.OpInfluence { +func NewTotalOpInfluence(operators []*operator.Operator, cluster sche.ClusterInformer) operator.OpInfluence { influence := *operator.NewOpInfluence() for _, op := range operators { diff --git a/pkg/schedule/range_cluster.go b/pkg/schedule/range_cluster.go index 6eb5117290c..841d14982e4 100644 --- a/pkg/schedule/range_cluster.go +++ b/pkg/schedule/range_cluster.go @@ -17,19 +17,19 @@ package schedule import ( "github.com/docker/go-units" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/schedule/scheduling" + sche "github.com/tikv/pd/pkg/schedule/core" ) // RangeCluster isolates the cluster by range. type RangeCluster struct { - scheduling.ClusterInformer + sche.ClusterInformer subCluster *core.BasicCluster // Collect all regions belong to the range. tolerantSizeRatio float64 } // GenRangeCluster gets a range cluster by specifying start key and end key. // The cluster can only know the regions within [startKey, endKey]. -func GenRangeCluster(cluster scheduling.ClusterInformer, startKey, endKey []byte) *RangeCluster { +func GenRangeCluster(cluster sche.ClusterInformer, startKey, endKey []byte) *RangeCluster { subCluster := core.NewBasicCluster() for _, r := range cluster.ScanRegions(startKey, endKey, -1) { origin, overlaps, rangeChanged := subCluster.SetRegion(r) diff --git a/pkg/schedule/region_scatterer.go b/pkg/schedule/region_scatterer.go index 920885269fe..090cf8dfce8 100644 --- a/pkg/schedule/region_scatterer.go +++ b/pkg/schedule/region_scatterer.go @@ -29,10 +29,10 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" @@ -132,7 +132,7 @@ func (s *selectedStores) getDistributionByGroupLocked(group string) (map[uint64] type RegionScatterer struct { ctx context.Context name string - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer ordinaryEngine engineContext specialEngines sync.Map opController *OperatorController @@ -140,7 +140,7 @@ type RegionScatterer struct { // NewRegionScatterer creates a region scatterer. // RegionScatter is used for the `Lightning`, it will scatter the specified regions before import data. -func NewRegionScatterer(ctx context.Context, cluster scheduling.ClusterInformer, opController *OperatorController) *RegionScatterer { +func NewRegionScatterer(ctx context.Context, cluster sche.ClusterInformer, opController *OperatorController) *RegionScatterer { return &RegionScatterer{ ctx: ctx, name: regionScatterName, diff --git a/pkg/schedule/region_splitter.go b/pkg/schedule/region_splitter.go index d58a636c416..8cf5402dbb0 100644 --- a/pkg/schedule/region_splitter.go +++ b/pkg/schedule/region_splitter.go @@ -25,9 +25,9 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" @@ -45,7 +45,7 @@ type SplitRegionsHandler interface { } // NewSplitRegionsHandler return SplitRegionsHandler -func NewSplitRegionsHandler(cluster scheduling.ClusterInformer, oc *OperatorController) SplitRegionsHandler { +func NewSplitRegionsHandler(cluster sche.ClusterInformer, oc *OperatorController) SplitRegionsHandler { return &splitRegionsHandler{ cluster: cluster, oc: oc, @@ -54,12 +54,12 @@ func NewSplitRegionsHandler(cluster scheduling.ClusterInformer, oc *OperatorCont // RegionSplitter handles split regions type RegionSplitter struct { - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer handler SplitRegionsHandler } // NewRegionSplitter return a region splitter -func NewRegionSplitter(cluster scheduling.ClusterInformer, handler SplitRegionsHandler) *RegionSplitter { +func NewRegionSplitter(cluster sche.ClusterInformer, handler SplitRegionsHandler) *RegionSplitter { return &RegionSplitter{ cluster: cluster, handler: handler, @@ -178,7 +178,7 @@ func (r *RegionSplitter) checkRegionValid(region *core.RegionInfo) bool { } type splitRegionsHandler struct { - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer oc *OperatorController } diff --git a/pkg/schedule/scheduler.go b/pkg/schedule/scheduler.go index 49eb9c35e3a..5a857ff3583 100644 --- a/pkg/schedule/scheduler.go +++ b/pkg/schedule/scheduler.go @@ -24,9 +24,9 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/config" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) @@ -40,10 +40,10 @@ type Scheduler interface { EncodeConfig() ([]byte, error) GetMinInterval() time.Duration GetNextInterval(interval time.Duration) time.Duration - Prepare(cluster scheduling.ClusterInformer) error - Cleanup(cluster scheduling.ClusterInformer) - Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) - IsScheduleAllowed(cluster scheduling.ClusterInformer) bool + Prepare(cluster sche.ClusterInformer) error + Cleanup(cluster sche.ClusterInformer) + Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) + IsScheduleAllowed(cluster sche.ClusterInformer) bool } // EncodeConfig encode the custom config for each scheduler. diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 3ca644fb056..3b8244a9a3e 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -29,10 +29,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -227,7 +227,7 @@ func (l *balanceLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(l.conf) } -func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := l.opController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() @@ -327,7 +327,7 @@ func (cs *candidateStores) resortStoreWithPos(pos int) { } } -func (l *balanceLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *balanceLeaderScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { l.conf.mu.RLock() defer l.conf.mu.RUnlock() basePlan := NewBalanceSchedulerPlan() diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 74a9217db2d..745ed402e68 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -24,10 +24,10 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -116,7 +116,7 @@ func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *balanceRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.opController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() @@ -124,7 +124,7 @@ func (s *balanceRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInf return allowed } -func (s *balanceRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *balanceRegionScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { basePlan := NewBalanceSchedulerPlan() var collector *plan.Collector if dryRun { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index be71233fb65..edbca9f5c0d 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -29,10 +29,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -212,7 +212,7 @@ func (b *balanceWitnessScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(b.conf) } -func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := b.opController.OperatorCount(operator.OpWitness) < cluster.GetOpts().GetWitnessScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(b.GetType(), operator.OpWitness.String()).Inc() @@ -220,7 +220,7 @@ func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster scheduling.ClusterIn return allowed } -func (b *balanceWitnessScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (b *balanceWitnessScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { b.conf.mu.RLock() defer b.conf.mu.RUnlock() basePlan := NewBalanceSchedulerPlan() diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index db1bebba6b4..c005e1346f2 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" - "github.com/tikv/pd/pkg/schedule/scheduling" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/utils/typeutil" ) @@ -88,7 +88,7 @@ func (s *BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { } // Prepare does some prepare work -func (s *BaseScheduler) Prepare(cluster scheduling.ClusterInformer) error { return nil } +func (s *BaseScheduler) Prepare(cluster sche.ClusterInformer) error { return nil } // Cleanup does some cleanup work -func (s *BaseScheduler) Cleanup(cluster scheduling.ClusterInformer) {} +func (s *BaseScheduler) Cleanup(cluster sche.ClusterInformer) {} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 803963ea11e..8b3cba3d2de 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -26,10 +26,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -60,7 +60,7 @@ type evictLeaderSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWithRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer } func (conf *evictLeaderSchedulerConfig) getStores() []uint64 { @@ -204,7 +204,7 @@ func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error { +func (s *evictLeaderScheduler) Prepare(cluster sche.ClusterInformer) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -216,7 +216,7 @@ func (s *evictLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error return res } -func (s *evictLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { +func (s *evictLeaderScheduler) Cleanup(cluster sche.ClusterInformer) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWithRanges { @@ -224,7 +224,7 @@ func (s *evictLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { } } -func (s *evictLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -232,7 +232,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInfor return allowed } -func (s *evictLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize), nil } @@ -257,7 +257,7 @@ type evictLeaderStoresConf interface { getKeyRangesByID(id uint64) []core.KeyRange } -func scheduleEvictLeaderBatch(name, typ string, cluster scheduling.ClusterInformer, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { +func scheduleEvictLeaderBatch(name, typ string, cluster sche.ClusterInformer, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { var ops []*operator.Operator for i := 0; i < batchSize; i++ { once := scheduleEvictLeaderOnce(name, typ, cluster, conf) @@ -274,7 +274,7 @@ func scheduleEvictLeaderBatch(name, typ string, cluster scheduling.ClusterInform return ops } -func scheduleEvictLeaderOnce(name, typ string, cluster scheduling.ClusterInformer, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderOnce(name, typ string, cluster sche.ClusterInformer, conf evictLeaderStoresConf) []*operator.Operator { stores := conf.getStores() ops := make([]*operator.Operator, 0, len(stores)) for _, storeID := range stores { diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index e0ceee57741..6fac1704bf0 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -20,9 +20,9 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) @@ -110,7 +110,7 @@ func (s *evictSlowStoreScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictSlowStoreScheduler) Prepare(cluster scheduling.ClusterInformer) error { +func (s *evictSlowStoreScheduler) Prepare(cluster sche.ClusterInformer) error { evictStore := s.conf.evictStore() if evictStore != 0 { return cluster.SlowStoreEvicted(evictStore) @@ -118,11 +118,11 @@ func (s *evictSlowStoreScheduler) Prepare(cluster scheduling.ClusterInformer) er return nil } -func (s *evictSlowStoreScheduler) Cleanup(cluster scheduling.ClusterInformer) { +func (s *evictSlowStoreScheduler) Cleanup(cluster sche.ClusterInformer) { s.cleanupEvictLeader(cluster) } -func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster scheduling.ClusterInformer, storeID uint64) error { +func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster sche.ClusterInformer, storeID uint64) error { err := s.conf.setStoreAndPersist(storeID) if err != nil { log.Info("evict-slow-store-scheduler persist config failed", zap.Uint64("store-id", storeID)) @@ -132,7 +132,7 @@ func (s *evictSlowStoreScheduler) prepareEvictLeader(cluster scheduling.ClusterI return cluster.SlowStoreEvicted(storeID) } -func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster scheduling.ClusterInformer) { +func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.ClusterInformer) { evictSlowStore, err := s.conf.clearAndPersist() if err != nil { log.Info("evict-slow-store-scheduler persist config failed", zap.Uint64("store-id", evictSlowStore)) @@ -143,11 +143,11 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster scheduling.ClusterI cluster.SlowStoreRecovered(evictSlowStore) } -func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster scheduling.ClusterInformer) []*operator.Operator { +func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.ClusterInformer) []*operator.Operator { return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) } -func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { if s.conf.evictStore() != 0 { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { @@ -158,7 +158,7 @@ func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster scheduling.ClusterIn return true } -func (s *evictSlowStoreScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowStoreScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { evictSlowStoreCounter.Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index b8ec5a8732d..14d35c21a24 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -18,17 +18,16 @@ import ( "strconv" "time" - "go.uber.org/zap" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" + "go.uber.org/zap" ) const ( @@ -109,7 +108,7 @@ func (conf *evictSlowTrendSchedulerConfig) setStoreAndPersist(id uint64) error { return conf.Persist() } -func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster scheduling.ClusterInformer) (oldID uint64, err error) { +func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster sche.ClusterInformer) (oldID uint64, err error) { oldID = conf.evictedStore() if oldID == 0 { return @@ -141,7 +140,7 @@ func (s *evictSlowTrendScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictSlowTrendScheduler) Prepare(cluster scheduling.ClusterInformer) error { +func (s *evictSlowTrendScheduler) Prepare(cluster sche.ClusterInformer) error { evictedStoreID := s.conf.evictedStore() if evictedStoreID == 0 { return nil @@ -149,11 +148,11 @@ func (s *evictSlowTrendScheduler) Prepare(cluster scheduling.ClusterInformer) er return cluster.SlowTrendEvicted(evictedStoreID) } -func (s *evictSlowTrendScheduler) Cleanup(cluster scheduling.ClusterInformer) { +func (s *evictSlowTrendScheduler) Cleanup(cluster sche.ClusterInformer) { s.cleanupEvictLeader(cluster) } -func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster scheduling.ClusterInformer, storeID uint64) error { +func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster sche.ClusterInformer, storeID uint64) error { err := s.conf.setStoreAndPersist(storeID) if err != nil { log.Info("evict-slow-trend-scheduler persist config failed", zap.Uint64("store-id", storeID)) @@ -162,7 +161,7 @@ func (s *evictSlowTrendScheduler) prepareEvictLeader(cluster scheduling.ClusterI return cluster.SlowTrendEvicted(storeID) } -func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster scheduling.ClusterInformer) { +func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster sche.ClusterInformer) { evictedStoreID, err := s.conf.clearAndPersist(cluster) if err != nil { log.Info("evict-slow-trend-scheduler persist config failed", zap.Uint64("store-id", evictedStoreID)) @@ -172,7 +171,7 @@ func (s *evictSlowTrendScheduler) cleanupEvictLeader(cluster scheduling.ClusterI } } -func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster scheduling.ClusterInformer) []*operator.Operator { +func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.ClusterInformer) []*operator.Operator { store := cluster.GetStore(s.conf.evictedStore()) if store == nil { return nil @@ -181,7 +180,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster scheduling.Cluster return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) } -func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { if s.conf.evictedStore() == 0 { return true } @@ -192,7 +191,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster scheduling.ClusterIn return allowed } -func (s *evictSlowTrendScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowTrendScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() var ops []*operator.Operator @@ -272,7 +271,7 @@ func newEvictSlowTrendScheduler(opController *schedule.OperatorController, conf } } -func chooseEvictCandidate(cluster scheduling.ClusterInformer) (slowStore *core.StoreInfo) { +func chooseEvictCandidate(cluster sche.ClusterInformer) (slowStore *core.StoreInfo) { stores := cluster.GetStores() if len(stores) < 3 { storeSlowTrendActionStatusGauge.WithLabelValues("cand.none:too-few").Inc() @@ -332,7 +331,7 @@ func chooseEvictCandidate(cluster scheduling.ClusterInformer) (slowStore *core.S return store } -func checkStoresAreUpdated(cluster scheduling.ClusterInformer, slowStoreID uint64, slowStoreRecordTS time.Time) bool { +func checkStoresAreUpdated(cluster sche.ClusterInformer, slowStoreID uint64, slowStoreRecordTS time.Time) bool { stores := cluster.GetStores() if len(stores) <= 1 { return false @@ -361,7 +360,7 @@ func checkStoresAreUpdated(cluster scheduling.ClusterInformer, slowStoreID uint6 return updatedStores >= expected } -func checkStoreSlowerThanOthers(cluster scheduling.ClusterInformer, target *core.StoreInfo) bool { +func checkStoreSlowerThanOthers(cluster sche.ClusterInformer, target *core.StoreInfo) bool { stores := cluster.GetStores() expected := (len(stores)*2 + 1) / 3 targetSlowTrend := target.GetSlowTrend() @@ -392,7 +391,7 @@ func checkStoreSlowerThanOthers(cluster scheduling.ClusterInformer, target *core return slowerThanStoresNum >= expected } -func checkStoreCanRecover(cluster scheduling.ClusterInformer, target *core.StoreInfo) bool { +func checkStoreCanRecover(cluster sche.ClusterInformer, target *core.StoreInfo) bool { /* // // This might not be necessary, @@ -415,7 +414,7 @@ func checkStoreCanRecover(cluster scheduling.ClusterInformer, target *core.Store return checkStoreFasterThanOthers(cluster, target) } -func checkStoreFasterThanOthers(cluster scheduling.ClusterInformer, target *core.StoreInfo) bool { +func checkStoreFasterThanOthers(cluster sche.ClusterInformer, target *core.StoreInfo) bool { stores := cluster.GetStores() expected := (len(stores) + 1) / 2 targetSlowTrend := target.GetSlowTrend() diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index d2b72aae30b..e5fa152bed7 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -26,10 +26,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage/endpoint" @@ -55,7 +55,7 @@ var ( type grantHotRegionSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer StoreIDs []uint64 `json:"store-id"` StoreLeaderID uint64 `json:"store-leader-id"` } @@ -152,7 +152,7 @@ func (s *grantHotRegionScheduler) EncodeConfig() ([]byte, error) { // IsScheduleAllowed returns whether the scheduler is allowed to schedule. // TODO it should check if there is any scheduler such as evict or hot region scheduler -func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !regionAllowed { @@ -226,14 +226,14 @@ func newGrantHotRegionHandler(config *grantHotRegionSchedulerConfig) http.Handle return router } -func (s *grantHotRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantHotRegionScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { grantHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) return s.dispatch(rw, cluster), nil } -func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster scheduling.ClusterInformer) []*operator.Operator { +func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster sche.ClusterInformer) []*operator.Operator { stLoadInfos := s.stLoadInfos[buildResourceType(typ, constant.RegionKind)] infos := make([]*statistics.StoreLoadDetail, len(stLoadInfos)) index := 0 @@ -247,7 +247,7 @@ func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster schedu return s.randomSchedule(cluster, infos) } -func (s *grantHotRegionScheduler) randomSchedule(cluster scheduling.ClusterInformer, srcStores []*statistics.StoreLoadDetail) (ops []*operator.Operator) { +func (s *grantHotRegionScheduler) randomSchedule(cluster sche.ClusterInformer, srcStores []*statistics.StoreLoadDetail) (ops []*operator.Operator) { isLeader := s.r.Int()%2 == 1 for _, srcStore := range srcStores { srcStoreID := srcStore.GetID() @@ -278,7 +278,7 @@ func (s *grantHotRegionScheduler) randomSchedule(cluster scheduling.ClusterInfor return nil } -func (s *grantHotRegionScheduler) transfer(cluster scheduling.ClusterInformer, regionID uint64, srcStoreID uint64, isLeader bool) (op *operator.Operator, err error) { +func (s *grantHotRegionScheduler) transfer(cluster sche.ClusterInformer, regionID uint64, srcStoreID uint64, isLeader bool) (op *operator.Operator, err error) { srcRegion := cluster.GetRegion(regionID) if srcRegion == nil || len(srcRegion.GetDownPeers()) != 0 || len(srcRegion.GetPendingPeers()) != 0 { return nil, errs.ErrRegionRuleNotFound diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 160bd05cc3f..dab40d174cc 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -25,10 +25,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -53,7 +53,7 @@ type grantLeaderSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWithRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer } func (conf *grantLeaderSchedulerConfig) BuildWithArgs(args []string) error { @@ -178,7 +178,7 @@ func (s *grantLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *grantLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error { +func (s *grantLeaderScheduler) Prepare(cluster sche.ClusterInformer) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -190,7 +190,7 @@ func (s *grantLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error return res } -func (s *grantLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { +func (s *grantLeaderScheduler) Cleanup(cluster sche.ClusterInformer) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWithRanges { @@ -198,7 +198,7 @@ func (s *grantLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { } } -func (s *grantLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -206,7 +206,7 @@ func (s *grantLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInfor return allowed } -func (s *grantLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantLeaderScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { grantLeaderCounter.Inc() s.conf.mu.RLock() defer s.conf.mu.RUnlock() diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 22edf79cbf7..c4e2c3d90c7 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -33,10 +33,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/utils/keyutil" @@ -119,7 +119,7 @@ func newBaseHotScheduler(opController *schedule.OperatorController) *baseHotSche // prepareForBalance calculate the summary of pending Influence for each store and prepare the load detail for // each store, only update read or write load detail -func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster scheduling.ClusterInformer) { +func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sche.ClusterInformer) { h.stInfos = statistics.SummaryStoreInfos(cluster.GetStores()) h.summaryPendingInfluence(cluster) h.storesLoads = cluster.GetStoresLoads() @@ -158,7 +158,7 @@ func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sched // summaryPendingInfluence calculate the summary of pending Influence for each store // and clean the region from regionInfluence if they have ended operator. // It makes each dim rate or count become `weight` times to the origin value. -func (h *baseHotScheduler) summaryPendingInfluence(cluster scheduling.ClusterInformer) { +func (h *baseHotScheduler) summaryPendingInfluence(cluster sche.ClusterInformer) { for id, p := range h.regionPendings { from := h.stInfos[p.from] to := h.stInfos[p.to] @@ -259,7 +259,7 @@ func (h *hotScheduler) GetNextInterval(interval time.Duration) time.Duration { return intervalGrow(h.GetMinInterval(), maxHotScheduleInterval, exponentialGrowth) } -func (h *hotScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (h *hotScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetOpts().GetHotRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc() @@ -267,13 +267,13 @@ func (h *hotScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) boo return allowed } -func (h *hotScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (h *hotScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { hotSchedulerCounter.Inc() rw := h.randomRWType() return h.dispatch(rw, cluster), nil } -func (h *hotScheduler) dispatch(typ statistics.RWType, cluster scheduling.ClusterInformer) []*operator.Operator { +func (h *hotScheduler) dispatch(typ statistics.RWType, cluster sche.ClusterInformer) []*operator.Operator { h.Lock() defer h.Unlock() h.prepareForBalance(typ, cluster) @@ -307,7 +307,7 @@ func (h *hotScheduler) tryAddPendingInfluence(op *operator.Operator, srcStore, d return true } -func (h *hotScheduler) balanceHotReadRegions(cluster scheduling.ClusterInformer) []*operator.Operator { +func (h *hotScheduler) balanceHotReadRegions(cluster sche.ClusterInformer) []*operator.Operator { leaderSolver := newBalanceSolver(h, cluster, statistics.Read, transferLeader) leaderOps := leaderSolver.solve() peerSolver := newBalanceSolver(h, cluster, statistics.Read, movePeer) @@ -350,7 +350,7 @@ func (h *hotScheduler) balanceHotReadRegions(cluster scheduling.ClusterInformer) return nil } -func (h *hotScheduler) balanceHotWriteRegions(cluster scheduling.ClusterInformer) []*operator.Operator { +func (h *hotScheduler) balanceHotWriteRegions(cluster sche.ClusterInformer) []*operator.Operator { // prefer to balance by peer s := h.r.Intn(100) switch { @@ -439,7 +439,7 @@ func isAvailableV1(s *solution) bool { } type balanceSolver struct { - scheduling.ClusterInformer + sche.ClusterInformer sche *hotScheduler stLoadDetail map[uint64]*statistics.StoreLoadDetail rwTy statistics.RWType @@ -575,7 +575,7 @@ func (bs *balanceSolver) getPriorities() []string { return []string{} } -func newBalanceSolver(sche *hotScheduler, cluster scheduling.ClusterInformer, rwTy statistics.RWType, opTy opType) *balanceSolver { +func newBalanceSolver(sche *hotScheduler, cluster sche.ClusterInformer, rwTy statistics.RWType, opTy opType) *balanceSolver { bs := &balanceSolver{ ClusterInformer: cluster, sche: sche, diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 875c0792f89..eb5683621bf 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -25,7 +25,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" - "github.com/tikv/pd/pkg/schedule/scheduling" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage/endpoint" @@ -434,7 +434,7 @@ func (conf *hotRegionSchedulerConfig) persistLocked() error { return conf.storage.SaveScheduleConfig(HotRegionName, data) } -func (conf *hotRegionSchedulerConfig) checkQuerySupport(cluster scheduling.ClusterInformer) bool { +func (conf *hotRegionSchedulerConfig) checkQuerySupport(cluster sche.ClusterInformer) bool { querySupport := versioninfo.IsFeatureSupported(cluster.GetOpts().GetClusterVersion(), versioninfo.HotScheduleWithQuery) conf.Lock() defer conf.Unlock() diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 63375fed27d..2642c889adc 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -21,10 +21,10 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/config" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "go.uber.org/zap" ) @@ -76,7 +76,7 @@ func (s *labelScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *labelScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *labelScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -84,7 +84,7 @@ func (s *labelScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) b return allowed } -func (s *labelScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *labelScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { labelCounter.Inc() stores := cluster.GetStores() rejectLeaderStores := make(map[uint64]struct{}) diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index ca60909f1b1..cbf348a4e46 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -23,10 +23,10 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/checker" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -78,7 +78,7 @@ func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *randomMergeScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetOpts().GetMergeScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpMerge.String()).Inc() @@ -86,7 +86,7 @@ func (s *randomMergeScheduler) IsScheduleAllowed(cluster scheduling.ClusterInfor return allowed } -func (s *randomMergeScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *randomMergeScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). @@ -129,7 +129,7 @@ func (s *randomMergeScheduler) Schedule(cluster scheduling.ClusterInformer, dryR return ops, nil } -func (s *randomMergeScheduler) allowMerge(cluster scheduling.ClusterInformer, region, target *core.RegionInfo) bool { +func (s *randomMergeScheduler) allowMerge(cluster sche.ClusterInformer, region, target *core.RegionInfo) bool { if !filter.IsRegionHealthy(region) || !filter.IsRegionHealthy(target) { return false } diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 1b81335b286..fef250fac29 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -23,9 +23,9 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -169,11 +169,11 @@ func (l *scatterRangeScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(l.config) } -func (l *scatterRangeScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (l *scatterRangeScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { return l.allowBalanceLeader(cluster) || l.allowBalanceRegion(cluster) } -func (l *scatterRangeScheduler) allowBalanceLeader(cluster scheduling.ClusterInformer) bool { +func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.ClusterInformer) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() @@ -181,7 +181,7 @@ func (l *scatterRangeScheduler) allowBalanceLeader(cluster scheduling.ClusterInf return allowed } -func (l *scatterRangeScheduler) allowBalanceRegion(cluster scheduling.ClusterInformer) bool { +func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.ClusterInformer) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetOpts().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpRegion.String()).Inc() @@ -189,7 +189,7 @@ func (l *scatterRangeScheduler) allowBalanceRegion(cluster scheduling.ClusterInf return allowed } -func (l *scatterRangeScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *scatterRangeScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { scatterRangeCounter.Inc() // isolate a new cluster according to the key range c := schedule.GenRangeCluster(cluster, l.config.GetStartKey(), l.config.GetEndKey()) diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index e73e48eb184..75525ae25e0 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -20,10 +20,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics" "go.uber.org/zap" ) @@ -78,7 +78,7 @@ func (s *shuffleHotRegionScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { hotRegionAllowed := s.OpController.OperatorCount(operator.OpHotRegion) < s.conf.Limit regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() @@ -94,7 +94,7 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster scheduling.Cluster return hotRegionAllowed && regionAllowed && leaderAllowed } -func (s *shuffleHotRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleHotRegionScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { shuffleHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -102,7 +102,7 @@ func (s *shuffleHotRegionScheduler) Schedule(cluster scheduling.ClusterInformer, return operators, nil } -func (s *shuffleHotRegionScheduler) randomSchedule(cluster scheduling.ClusterInformer, loadDetail map[uint64]*statistics.StoreLoadDetail) []*operator.Operator { +func (s *shuffleHotRegionScheduler) randomSchedule(cluster sche.ClusterInformer, loadDetail map[uint64]*statistics.StoreLoadDetail) []*operator.Operator { for _, detail := range loadDetail { if len(detail.HotPeers) < 1 { continue diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 3ee8f4f04e6..f3c0b16bde1 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -20,10 +20,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -79,7 +79,7 @@ func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -87,7 +87,7 @@ func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInf return allowed } -func (s *shuffleLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleLeaderScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { // We shuffle leaders between stores by: // 1. random select a valid store. // 2. transfer a leader to the store. diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index 8fed838aab1..31f62be4414 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -21,10 +21,10 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -81,7 +81,7 @@ func (s *shuffleRegionScheduler) EncodeConfig() ([]byte, error) { return s.conf.EncodeConfig() } -func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetOpts().GetRegionScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() @@ -89,7 +89,7 @@ func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster scheduling.ClusterInf return allowed } -func (s *shuffleRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleRegionScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { shuffleRegionCounter.Inc() region, oldPeer := s.scheduleRemovePeer(cluster) if region == nil { @@ -113,7 +113,7 @@ func (s *shuffleRegionScheduler) Schedule(cluster scheduling.ClusterInformer, dr return []*operator.Operator{op}, nil } -func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster scheduling.ClusterInformer) (*core.RegionInfo, *metapb.Peer) { +func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster sche.ClusterInformer) (*core.RegionInfo, *metapb.Peer) { candidates := filter.NewCandidates(cluster.GetStores()). FilterSource(cluster.GetOpts(), nil, nil, s.filters...). Shuffle() @@ -145,7 +145,7 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster scheduling.ClusterIn return nil, nil } -func (s *shuffleRegionScheduler) scheduleAddPeer(cluster scheduling.ClusterInformer, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { +func (s *shuffleRegionScheduler) scheduleAddPeer(cluster sche.ClusterInformer, region *core.RegionInfo, oldPeer *metapb.Peer) *metapb.Peer { store := cluster.GetStore(oldPeer.GetStoreId()) if store == nil { return nil diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 03d9d8ca232..69881887623 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -25,9 +25,9 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/syncutil" @@ -167,7 +167,7 @@ func (s *splitBucketScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) } // IsScheduleAllowed return true if the sum of executing opSplit operator is less . -func (s *splitBucketScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *splitBucketScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { if !cluster.GetStoreConfig().IsEnableRegionBucket() { splitBucketDisableCounter.Inc() return false @@ -182,13 +182,13 @@ func (s *splitBucketScheduler) IsScheduleAllowed(cluster scheduling.ClusterInfor type splitBucketPlan struct { hotBuckets map[uint64][]*buckets.BucketStat - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer conf *splitBucketSchedulerConfig hotRegionSplitSize int64 } // Schedule return operators if some bucket is too hot. -func (s *splitBucketScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *splitBucketScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { splitBucketScheduleCounter.Inc() conf := s.conf.Clone() plan := &splitBucketPlan{ diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 6af0545feac..c66d467de42 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -21,10 +21,10 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - "github.com/tikv/pd/pkg/schedule/scheduling" ) const ( @@ -68,16 +68,16 @@ func (s *trasferWitnessLeaderScheduler) GetType() string { return TransferWitnessLeaderType } -func (s *trasferWitnessLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *trasferWitnessLeaderScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { return true } -func (s *trasferWitnessLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *trasferWitnessLeaderScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil } -func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster scheduling.ClusterInformer, batchSize int) []*operator.Operator { +func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster sche.ClusterInformer, batchSize int) []*operator.Operator { var ops []*operator.Operator for i := 0; i < batchSize; i++ { select { @@ -99,7 +99,7 @@ func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, return ops } -func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster scheduling.ClusterInformer, region *core.RegionInfo) (*operator.Operator, error) { +func (s *trasferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster sche.ClusterInformer, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { diff --git a/pkg/schedule/schedulers/utils.go b/pkg/schedule/schedulers/utils.go index a6626be2d62..fa636d30d2f 100644 --- a/pkg/schedule/schedulers/utils.go +++ b/pkg/schedule/schedulers/utils.go @@ -24,9 +24,9 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics" "go.uber.org/zap" ) @@ -44,7 +44,7 @@ const ( type solver struct { *balanceSchedulerPlan - scheduling.ClusterInformer + sche.ClusterInformer kind constant.ScheduleKind opInfluence operator.OpInfluence tolerantSizeRatio float64 @@ -55,7 +55,7 @@ type solver struct { targetScore float64 } -func newSolver(basePlan *balanceSchedulerPlan, kind constant.ScheduleKind, cluster scheduling.ClusterInformer, opInfluence operator.OpInfluence) *solver { +func newSolver(basePlan *balanceSchedulerPlan, kind constant.ScheduleKind, cluster sche.ClusterInformer, opInfluence operator.OpInfluence) *solver { return &solver{ balanceSchedulerPlan: basePlan, ClusterInformer: cluster, @@ -182,7 +182,7 @@ func (p *solver) getTolerantResource() int64 { return p.tolerantSource } -func adjustTolerantRatio(cluster scheduling.ClusterInformer, kind constant.ScheduleKind) float64 { +func adjustTolerantRatio(cluster sche.ClusterInformer, kind constant.ScheduleKind) float64 { var tolerantSizeRatio float64 switch c := cluster.(type) { case *schedule.RangeCluster: diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 0cbd1a2e95f..ee8f0ffed81 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -26,11 +26,11 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/schedulers" - "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -96,7 +96,7 @@ type evictLeaderSchedulerConfig struct { mu syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWitRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer } func (conf *evictLeaderSchedulerConfig) BuildWithArgs(args []string) error { @@ -187,7 +187,7 @@ func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return schedule.EncodeConfig(s.conf) } -func (s *evictLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error { +func (s *evictLeaderScheduler) Prepare(cluster sche.ClusterInformer) error { s.conf.mu.RLock() defer s.conf.mu.RUnlock() var res error @@ -199,7 +199,7 @@ func (s *evictLeaderScheduler) Prepare(cluster scheduling.ClusterInformer) error return res } -func (s *evictLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { +func (s *evictLeaderScheduler) Cleanup(cluster sche.ClusterInformer) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() for id := range s.conf.StoreIDWitRanges { @@ -207,7 +207,7 @@ func (s *evictLeaderScheduler) Cleanup(cluster scheduling.ClusterInformer) { } } -func (s *evictLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInformer) bool { +func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.ClusterInformer) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetOpts().GetLeaderScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() @@ -215,7 +215,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster scheduling.ClusterInfor return allowed } -func (s *evictLeaderScheduler) Schedule(cluster scheduling.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster sche.ClusterInformer, dryRun bool) ([]*operator.Operator, []plan.Plan) { ops := make([]*operator.Operator, 0, len(s.conf.StoreIDWitRanges)) s.conf.mu.RLock() defer s.conf.mu.RUnlock() diff --git a/server/cluster/coordinator_test.go b/server/cluster/coordinator_test.go index 363084add54..fd4337750f0 100644 --- a/server/cluster/coordinator_test.go +++ b/server/cluster/coordinator_test.go @@ -33,11 +33,11 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mock/mockhbstream" "github.com/tikv/pd/pkg/schedule" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/schedulers" - sche "github.com/tikv/pd/pkg/schedule/scheduling" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" diff --git a/server/replication/replication_mode.go b/server/replication/replication_mode.go index 183c496d6cf..ba3aef9453e 100644 --- a/server/replication/replication_mode.go +++ b/server/replication/replication_mode.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule/scheduling" + sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/logutil" @@ -72,7 +72,7 @@ type ModeManager struct { syncutil.RWMutex config config.ReplicationModeConfig storage endpoint.ReplicationStatusStorage - cluster scheduling.ClusterInformer + cluster sche.ClusterInformer fileReplicater FileReplicater replicatedMembers []uint64 @@ -91,7 +91,7 @@ type ModeManager struct { } // NewReplicationModeManager creates the replicate mode manager. -func NewReplicationModeManager(config config.ReplicationModeConfig, storage endpoint.ReplicationStatusStorage, cluster scheduling.ClusterInformer, fileReplicater FileReplicater) (*ModeManager, error) { +func NewReplicationModeManager(config config.ReplicationModeConfig, storage endpoint.ReplicationStatusStorage, cluster sche.ClusterInformer, fileReplicater FileReplicater) (*ModeManager, error) { m := &ModeManager{ initTime: time.Now(), config: config,