From cbba49ab32ddd82d86194abd8cf9c25d1db0c667 Mon Sep 17 00:00:00 2001 From: Ray Yan Date: Tue, 16 Jul 2024 17:40:29 +0800 Subject: [PATCH 01/20] grpc: allow client send consecutive pings in every 5 seconds (#8120) close tikv/pd#8119 grpc: allow client send consecutive pings in every 5 seconds Signed-off-by: yongman Co-authored-by: Hu# --- pkg/mcs/utils/util.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/mcs/utils/util.go b/pkg/mcs/utils/util.go index fb78f0b4be3..844cf17fde4 100644 --- a/pkg/mcs/utils/util.go +++ b/pkg/mcs/utils/util.go @@ -42,6 +42,7 @@ import ( "go.etcd.io/etcd/pkg/types" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( @@ -229,7 +230,14 @@ func StartGRPCAndHTTPServers(s server, serverReadyChan chan<- struct{}, l net.Li httpListener = mux.Match(cmux.HTTP1()) } - grpcServer := grpc.NewServer() + grpcServer := grpc.NewServer( + // Allow clients send consecutive pings in every 5 seconds. + // The default value of MinTime is 5 minutes, + // which is too long compared with 10 seconds of TiKV's pd client keepalive time. + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 5 * time.Second, + }), + ) s.SetGRPCServer(grpcServer) s.RegisterGRPCService(grpcServer) diagnosticspb.RegisterDiagnosticsServer(grpcServer, s) From ecaef0232a6a341531aef82dd8a98d062903ae01 Mon Sep 17 00:00:00 2001 From: Hu# Date: Wed, 17 Jul 2024 11:34:30 +0800 Subject: [PATCH 02/20] middleware: fix waitForLeader in redirector panic (#8407) close tikv/pd#8406 Signed-off-by: husharp --- pkg/utils/apiutil/serverapi/middleware.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/apiutil/serverapi/middleware.go b/pkg/utils/apiutil/serverapi/middleware.go index 0718702b5a5..9af8d234b34 100644 --- a/pkg/utils/apiutil/serverapi/middleware.go +++ b/pkg/utils/apiutil/serverapi/middleware.go @@ -270,7 +270,7 @@ func (h *redirector) waitForLeader(r *http.Request) (leader *pdpb.Member) { } case <-r.Context().Done(): return - case <-h.s.LoopContext().Done(): + case <-h.s.Context().Done(): return } } From 5ec6af4030195f9e69d786193085c4326a4baeeb Mon Sep 17 00:00:00 2001 From: JmPotato Date: Wed, 17 Jul 2024 13:37:28 +0800 Subject: [PATCH 03/20] rg/controller: optimize the fallback mechanism of the controller (#8402) close tikv/pd#8388 This PR implements a more robust fallback method, which uses an independent default group controller for each tombstone group. Signed-off-by: JmPotato Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- .../resource_group/controller/controller.go | 123 ++++++++++++------ .../controller/controller_test.go | 60 +++++---- .../resourcemanager/resource_manager_test.go | 32 ++--- 3 files changed, 137 insertions(+), 78 deletions(-) diff --git a/client/resource_group/controller/controller.go b/client/resource_group/controller/controller.go index 8d57c46e855..cc18817d9c5 100644 --- a/client/resource_group/controller/controller.go +++ b/client/resource_group/controller/controller.go @@ -357,33 +357,38 @@ func (c *ResourceGroupsController) Start(ctx context.Context) { if err = proto.Unmarshal(item.Kv.Value, group); err != nil { continue } - if gc, ok := c.loadGroupController(group.Name); ok { + name := group.GetName() + gc, ok := c.loadGroupController(name) + if !ok { + continue + } + if !gc.tombstone.Load() { gc.modifyMeta(group) - // If the resource group is marked as tombstone before, set it as active again. - if swapped := gc.tombstone.CompareAndSwap(true, false); swapped { - resourceGroupStatusGauge.WithLabelValues(group.Name, gc.name).Set(1) - log.Info("[resource group controller] mark resource group as active", zap.String("name", group.Name)) - } + continue + } + // If the resource group is marked as tombstone before, re-create the resource group controller. + newGC, err := newGroupCostController(group, c.ruConfig, c.lowTokenNotifyChan, c.tokenBucketUpdateChan) + if err != nil { + log.Warn("[resource group controller] re-create resource group cost controller for tombstone failed", + zap.String("name", name), zap.Error(err)) + continue + } + if c.groupsController.CompareAndSwap(name, gc, newGC) { + log.Info("[resource group controller] re-create resource group cost controller for tombstone", + zap.String("name", name)) } case meta_storagepb.Event_DELETE: - if item.PrevKv != nil { - if err = proto.Unmarshal(item.PrevKv.Value, group); err != nil { - continue - } - // Do not delete the resource group immediately, just mark it as tombstone. - // For the requests that are still in progress, fallback to the default resource group. - if gc, ok := c.loadGroupController(group.Name); ok { - gc.tombstone.Store(true) - resourceGroupStatusGauge.DeleteLabelValues(group.Name, group.Name) - resourceGroupStatusGauge.WithLabelValues(group.Name, defaultResourceGroupName).Set(1) - log.Info("[resource group controller] mark resource group as tombstone", zap.String("name", group.Name)) - } - } else { - // Prev-kv is compacted means there must have been a delete event before this event, - // which means that this is just a duplicated event, so we can just ignore it. + // Prev-kv is compacted means there must have been a delete event before this event, + // which means that this is just a duplicated event, so we can just ignore it. + if item.PrevKv == nil { log.Info("[resource group controller] previous key-value pair has been compacted", zap.String("required-key", string(item.Kv.Key)), zap.String("value", string(item.Kv.Value))) + continue + } + if err = proto.Unmarshal(item.PrevKv.Value, group); err != nil { + continue } + c.tombstoneGroupCostController(group.GetName()) } } case resp, ok := <-watchConfigChannel: @@ -446,15 +451,23 @@ func (c *ResourceGroupsController) loadOrStoreGroupController(name string, gc *g return tmp.(*groupCostController), loaded } -// tryGetResourceGroup will try to get the resource group controller from local cache first, -// if the local cache misses, it will then call gRPC to fetch the resource group info from server. -func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name string) (*groupCostController, error) { +// NewResourceGroupNotExistErr returns a new error that indicates the resource group does not exist. +// It's exported for testing. +func NewResourceGroupNotExistErr(name string) error { + return errors.Errorf("%s does not exist", name) +} + +// tryGetResourceGroupController will try to get the resource group controller from local cache first. +// If the local cache misses, it will then call gRPC to fetch the resource group info from the remote server. +// If `useTombstone` is true, it will return the resource group controller even if it is marked as tombstone. +func (c *ResourceGroupsController) tryGetResourceGroupController( + ctx context.Context, name string, useTombstone bool, +) (*groupCostController, error) { // Get from the local cache first. gc, ok := c.loadGroupController(name) if ok { - // If the resource group is marked as tombstone, fallback to the default resource group. - if gc.tombstone.Load() && name != defaultResourceGroupName { - return c.tryGetResourceGroup(ctx, defaultResourceGroupName) + if !useTombstone && gc.tombstone.Load() { + return nil, NewResourceGroupNotExistErr(name) } return gc, nil } @@ -464,7 +477,7 @@ func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name return nil, err } if group == nil { - return nil, errors.Errorf("%s does not exists", name) + return nil, NewResourceGroupNotExistErr(name) } // Check again to prevent initializing the same resource group concurrently. if gc, ok = c.loadGroupController(name); ok { @@ -476,14 +489,51 @@ func (c *ResourceGroupsController) tryGetResourceGroup(ctx context.Context, name return nil, err } // Check again to prevent initializing the same resource group concurrently. - gc, loaded := c.loadOrStoreGroupController(group.Name, gc) + gc, loaded := c.loadOrStoreGroupController(name, gc) if !loaded { resourceGroupStatusGauge.WithLabelValues(name, group.Name).Set(1) - log.Info("[resource group controller] create resource group cost controller", zap.String("name", group.GetName())) + log.Info("[resource group controller] create resource group cost controller", zap.String("name", name)) } return gc, nil } +// Do not delete the resource group immediately to prevent from interrupting the ongoing request, +// mark it as tombstone and create a default resource group controller for it. +func (c *ResourceGroupsController) tombstoneGroupCostController(name string) { + _, ok := c.loadGroupController(name) + if !ok { + return + } + // The default resource group controller should never be deleted. + if name == defaultResourceGroupName { + return + } + // Try to get the default group meta first. + defaultGC, err := c.tryGetResourceGroupController(c.loopCtx, defaultResourceGroupName, false) + if err != nil || defaultGC == nil { + log.Warn("[resource group controller] get default resource group meta for tombstone failed", + zap.String("name", name), zap.Error(err)) + // Directly delete the resource group controller if the default group is not available. + c.groupsController.Delete(name) + return + } + // Create a default resource group controller for the tombstone resource group independently. + gc, err := newGroupCostController(defaultGC.getMeta(), c.ruConfig, c.lowTokenNotifyChan, c.tokenBucketUpdateChan) + if err != nil { + log.Warn("[resource group controller] create default resource group cost controller for tombstone failed", + zap.String("name", name), zap.Error(err)) + // Directly delete the resource group controller if the default group controller cannot be created. + c.groupsController.Delete(name) + return + } + gc.tombstone.Store(true) + c.groupsController.Store(name, gc) + // Its metrics will be deleted in the cleanup process. + resourceGroupStatusGauge.WithLabelValues(name, name).Set(2) + log.Info("[resource group controller] default resource group controller cost created for tombstone", + zap.String("name", name)) +} + func (c *ResourceGroupsController) cleanUpResourceGroup() { c.groupsController.Range(func(key, value any) bool { resourceGroupName := key.(string) @@ -496,7 +546,6 @@ func (c *ResourceGroupsController) cleanUpResourceGroup() { if gc.inactive || gc.tombstone.Load() { c.groupsController.Delete(resourceGroupName) resourceGroupStatusGauge.DeleteLabelValues(resourceGroupName, resourceGroupName) - resourceGroupStatusGauge.DeleteLabelValues(resourceGroupName, defaultResourceGroupName) return true } gc.inactive = true @@ -589,7 +638,7 @@ func (c *ResourceGroupsController) sendTokenBucketRequests(ctx context.Context, func (c *ResourceGroupsController) OnRequestWait( ctx context.Context, resourceGroupName string, info RequestInfo, ) (*rmpb.Consumption, *rmpb.Consumption, time.Duration, uint32, error) { - gc, err := c.tryGetResourceGroup(ctx, resourceGroupName) + gc, err := c.tryGetResourceGroupController(ctx, resourceGroupName, true) if err != nil { return nil, nil, time.Duration(0), 0, err } @@ -605,17 +654,13 @@ func (c *ResourceGroupsController) OnResponse( log.Warn("[resource group controller] resource group name does not exist", zap.String("name", resourceGroupName)) return &rmpb.Consumption{}, nil } - // If the resource group is marked as tombstone, fallback to the default resource group. - if gc.tombstone.Load() && resourceGroupName != defaultResourceGroupName { - return c.OnResponse(defaultResourceGroupName, req, resp) - } return gc.onResponse(req, resp) } // IsBackgroundRequest If the resource group has background jobs, we should not record consumption and wait for it. func (c *ResourceGroupsController) IsBackgroundRequest(ctx context.Context, resourceGroupName, requestResource string) bool { - gc, err := c.tryGetResourceGroup(ctx, resourceGroupName) + gc, err := c.tryGetResourceGroupController(ctx, resourceGroupName, false) if err != nil { return false } @@ -626,7 +671,7 @@ func (c *ResourceGroupsController) IsBackgroundRequest(ctx context.Context, func (c *ResourceGroupsController) checkBackgroundSettings(ctx context.Context, bg *rmpb.BackgroundSettings, requestResource string) bool { // fallback to default resource group. if bg == nil { - gc, err := c.tryGetResourceGroup(ctx, defaultResourceGroupName) + gc, err := c.tryGetResourceGroupController(ctx, defaultResourceGroupName, false) if err != nil { return false } @@ -646,7 +691,7 @@ func (c *ResourceGroupsController) checkBackgroundSettings(ctx context.Context, // GetResourceGroup returns the meta setting of the given resource group name. func (c *ResourceGroupsController) GetResourceGroup(resourceGroupName string) (*rmpb.ResourceGroup, error) { - gc, err := c.tryGetResourceGroup(c.loopCtx, resourceGroupName) + gc, err := c.tryGetResourceGroupController(c.loopCtx, resourceGroupName, false) if err != nil { return nil, err } diff --git a/client/resource_group/controller/controller_test.go b/client/resource_group/controller/controller_test.go index e198effb2d8..821364c292f 100644 --- a/client/resource_group/controller/controller_test.go +++ b/client/resource_group/controller/controller_test.go @@ -211,11 +211,11 @@ func TestControllerWithTwoGroupRequestConcurrency(t *testing.T) { mockProvider.On("GetResourceGroup", mock.Anything, defaultResourceGroupName, mock.Anything).Return(defaultResourceGroup, nil) mockProvider.On("GetResourceGroup", mock.Anything, "test-group", mock.Anything).Return(testResourceGroup, nil) - c1, err := controller.tryGetResourceGroup(ctx, defaultResourceGroupName) + c1, err := controller.tryGetResourceGroupController(ctx, defaultResourceGroupName, false) re.NoError(err) re.Equal(defaultResourceGroup, c1.meta) - c2, err := controller.tryGetResourceGroup(ctx, "test-group") + c2, err := controller.tryGetResourceGroupController(ctx, "test-group", false) re.NoError(err) re.Equal(testResourceGroup, c2.meta) @@ -271,7 +271,7 @@ func TestControllerWithTwoGroupRequestConcurrency(t *testing.T) { } } -func TestGetController(t *testing.T) { +func TestTryGetController(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -286,39 +286,51 @@ func TestGetController(t *testing.T) { mockProvider.On("GetResourceGroup", mock.Anything, "test-group", mock.Anything).Return(testResourceGroup, nil) mockProvider.On("GetResourceGroup", mock.Anything, "test-group-non-existent", mock.Anything).Return((*rmpb.ResourceGroup)(nil), nil) - c, err := controller.GetResourceGroup("test-group-non-existent") + gc, err := controller.tryGetResourceGroupController(ctx, "test-group-non-existent", false) re.Error(err) - re.Nil(c) - c, err = controller.GetResourceGroup(defaultResourceGroupName) + re.Nil(gc) + gc, err = controller.tryGetResourceGroupController(ctx, defaultResourceGroupName, false) re.NoError(err) - re.Equal(defaultResourceGroup, c) - c, err = controller.GetResourceGroup("test-group") + re.Equal(defaultResourceGroup, gc.getMeta()) + gc, err = controller.tryGetResourceGroupController(ctx, "test-group", false) re.NoError(err) - re.Equal(testResourceGroup, c) - _, _, _, _, err = controller.OnRequestWait(ctx, "test-group", &TestRequestInfo{}) + re.Equal(testResourceGroup, gc.getMeta()) + requestInfo, responseInfo := NewTestRequestInfo(true, 1, 1), NewTestResponseInfo(1, time.Millisecond, true) + _, _, _, _, err = controller.OnRequestWait(ctx, "test-group", requestInfo) re.NoError(err) - _, err = controller.OnResponse("test-group", &TestRequestInfo{}, &TestResponseInfo{}) + consumption, err := controller.OnResponse("test-group", requestInfo, responseInfo) re.NoError(err) + re.NotEmpty(consumption) // Mark the tombstone manually to test the fallback case. - gc, err := controller.tryGetResourceGroup(ctx, "test-group") + gc, err = controller.tryGetResourceGroupController(ctx, "test-group", false) re.NoError(err) - gc.tombstone.Store(true) - c, err = controller.GetResourceGroup("test-group") + re.NotNil(gc) + controller.tombstoneGroupCostController("test-group") + gc, err = controller.tryGetResourceGroupController(ctx, "test-group", false) + re.Error(err) + re.Nil(gc) + gc, err = controller.tryGetResourceGroupController(ctx, "test-group", true) + re.NoError(err) + re.Equal(defaultResourceGroup, gc.getMeta()) + _, _, _, _, err = controller.OnRequestWait(ctx, "test-group", requestInfo) re.NoError(err) - re.Equal(defaultResourceGroup, c) - _, _, _, _, err = controller.OnRequestWait(ctx, "test-group", &TestRequestInfo{}) + consumption, err = controller.OnResponse("test-group", requestInfo, responseInfo) re.NoError(err) - _, err = controller.OnResponse("test-group", &TestRequestInfo{}, &TestResponseInfo{}) + re.NotEmpty(consumption) + // Test the default group protection. + gc, err = controller.tryGetResourceGroupController(ctx, defaultResourceGroupName, false) re.NoError(err) - // Mark the default group tombstone manually to test the fallback case. - gc, err = controller.tryGetResourceGroup(ctx, defaultResourceGroupName) + re.Equal(defaultResourceGroup, gc.getMeta()) + controller.tombstoneGroupCostController(defaultResourceGroupName) + gc, err = controller.tryGetResourceGroupController(ctx, defaultResourceGroupName, false) re.NoError(err) - gc.tombstone.Store(true) - c, err = controller.GetResourceGroup(defaultResourceGroupName) + re.Equal(defaultResourceGroup, gc.getMeta()) + gc, err = controller.tryGetResourceGroupController(ctx, defaultResourceGroupName, true) re.NoError(err) - re.Equal(defaultResourceGroup, c) - _, _, _, _, err = controller.OnRequestWait(ctx, defaultResourceGroupName, &TestRequestInfo{}) + re.Equal(defaultResourceGroup, gc.getMeta()) + _, _, _, _, err = controller.OnRequestWait(ctx, defaultResourceGroupName, requestInfo) re.NoError(err) - _, err = controller.OnResponse(defaultResourceGroupName, &TestRequestInfo{}, &TestResponseInfo{}) + consumption, err = controller.OnResponse(defaultResourceGroupName, requestInfo, responseInfo) re.NoError(err) + re.NotEmpty(consumption) } diff --git a/tests/integrations/mcs/resourcemanager/resource_manager_test.go b/tests/integrations/mcs/resourcemanager/resource_manager_test.go index 635cb17b822..10b1a0b4520 100644 --- a/tests/integrations/mcs/resourcemanager/resource_manager_test.go +++ b/tests/integrations/mcs/resourcemanager/resource_manager_test.go @@ -403,9 +403,9 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupController() { CPUMsCost: 1, } - controller, _ := controller.NewResourceGroupController(suite.ctx, 1, cli, cfg) - controller.Start(suite.ctx) - defer controller.Stop() + rgsController, _ := controller.NewResourceGroupController(suite.ctx, 1, cli, cfg) + rgsController.Start(suite.ctx) + defer rgsController.Stop() testCases := []struct { resourceGroupName string @@ -445,13 +445,13 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupController() { rres := cas.tcs[i].makeReadResponse() wres := cas.tcs[i].makeWriteResponse() startTime := time.Now() - _, _, _, _, err := controller.OnRequestWait(suite.ctx, cas.resourceGroupName, rreq) + _, _, _, _, err := rgsController.OnRequestWait(suite.ctx, cas.resourceGroupName, rreq) re.NoError(err) - _, _, _, _, err = controller.OnRequestWait(suite.ctx, cas.resourceGroupName, wreq) + _, _, _, _, err = rgsController.OnRequestWait(suite.ctx, cas.resourceGroupName, wreq) re.NoError(err) sum += time.Since(startTime) - controller.OnResponse(cas.resourceGroupName, rreq, rres) - controller.OnResponse(cas.resourceGroupName, wreq, wres) + rgsController.OnResponse(cas.resourceGroupName, rreq, rres) + rgsController.OnResponse(cas.resourceGroupName, wreq, wres) time.Sleep(1000 * time.Microsecond) } re.LessOrEqual(sum, buffDuration+cas.tcs[i].waitDuration) @@ -464,11 +464,11 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupController() { re.NoError(failpoint.Enable("github.com/tikv/pd/client/resource_group/controller/triggerUpdate", "return(true)")) tcs := tokenConsumptionPerSecond{rruTokensAtATime: 1, wruTokensAtATime: 900000000, times: 1, waitDuration: 0} wreq := tcs.makeWriteRequest() - _, _, _, _, err = controller.OnRequestWait(suite.ctx, rg.Name, wreq) + _, _, _, _, err = rgsController.OnRequestWait(suite.ctx, rg.Name, wreq) re.Error(err) re.NoError(failpoint.Disable("github.com/tikv/pd/client/resource_group/controller/triggerUpdate")) - group, err := controller.GetResourceGroup(rg.Name) + group, err := rgsController.GetResourceGroup(rg.Name) re.NoError(err) re.Equal(rg, group) // Delete the resource group and make sure it is tombstone. @@ -476,19 +476,21 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupController() { re.NoError(err) re.Contains(resp, "Success!") // Make sure the resource group is watched by the controller and marked as tombstone. + expectedErr := controller.NewResourceGroupNotExistErr(rg.Name) testutil.Eventually(re, func() bool { - gc, err := controller.GetResourceGroup(rg.Name) - re.NoError(err) - return gc.GetName() == "default" + gc, err := rgsController.GetResourceGroup(rg.Name) + return err.Error() == expectedErr.Error() && gc == nil }, testutil.WithTickInterval(50*time.Millisecond)) // Add the resource group again. resp, err = cli.AddResourceGroup(suite.ctx, rg) re.NoError(err) re.Contains(resp, "Success!") - // Make sure the resource group can be set to active again. + // Make sure the resource group can be get by the controller again. testutil.Eventually(re, func() bool { - gc, err := controller.GetResourceGroup(rg.Name) - re.NoError(err) + gc, err := rgsController.GetResourceGroup(rg.Name) + if err != nil { + re.EqualError(err, expectedErr.Error()) + } return gc.GetName() == rg.Name }, testutil.WithTickInterval(50*time.Millisecond)) } From 1ad446e575f54b73598091b1a400e57c5a80af01 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Wed, 17 Jul 2024 13:52:30 +0800 Subject: [PATCH 04/20] schedule: fix split-merge-interval update (#8405) close tikv/pd#8404 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/schedule/checker/merge_checker.go | 9 +++++++-- pkg/schedule/checker/merge_checker_test.go | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index d7a28ad0ff8..1a7548a1084 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -46,6 +46,8 @@ const ( mergeOptionValueDeny = "deny" ) +var gcInterval = time.Minute + // MergeChecker ensures region to merge with adjacent region when size is small type MergeChecker struct { PauseController @@ -57,7 +59,7 @@ type MergeChecker struct { // NewMergeChecker creates a merge checker. func NewMergeChecker(ctx context.Context, cluster sche.CheckerCluster, conf config.CheckerConfigProvider) *MergeChecker { - splitCache := cache.NewIDTTL(ctx, time.Minute, conf.GetSplitMergeInterval()) + splitCache := cache.NewIDTTL(ctx, gcInterval, conf.GetSplitMergeInterval()) return &MergeChecker{ cluster: cluster, conf: conf, @@ -88,13 +90,16 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { return nil } + // update the split cache. + // It must be called before the following merge checker logic. + m.splitCache.UpdateTTL(m.conf.GetSplitMergeInterval()) + expireTime := m.startTime.Add(m.conf.GetSplitMergeInterval()) if time.Now().Before(expireTime) { mergeCheckerRecentlyStartCounter.Inc() return nil } - m.splitCache.UpdateTTL(m.conf.GetSplitMergeInterval()) if m.splitCache.Exists(region.GetID()) { mergeCheckerRecentlySplitCounter.Inc() return nil diff --git a/pkg/schedule/checker/merge_checker_test.go b/pkg/schedule/checker/merge_checker_test.go index 06e8d468de3..61b8cd579df 100644 --- a/pkg/schedule/checker/merge_checker_test.go +++ b/pkg/schedule/checker/merge_checker_test.go @@ -56,6 +56,7 @@ func TestMergeCheckerTestSuite(t *testing.T) { func (suite *mergeCheckerTestSuite) SetupTest() { cfg := mockconfig.NewTestOptions() + gcInterval = 100 * time.Millisecond suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetMaxMergeRegionSize(2) @@ -84,6 +85,7 @@ func (suite *mergeCheckerTestSuite) SetupTest() { } func (suite *mergeCheckerTestSuite) TearDownTest() { + gcInterval = time.Minute suite.cancel() } @@ -234,6 +236,7 @@ func (suite *mergeCheckerTestSuite) TestBasic() { ops = suite.mc.Check(suite.regions[3]) re.Nil(ops) + // issue #4616 suite.cluster.SetSplitMergeInterval(500 * time.Millisecond) ops = suite.mc.Check(suite.regions[2]) re.Nil(ops) @@ -245,6 +248,19 @@ func (suite *mergeCheckerTestSuite) TestBasic() { re.NotNil(ops) ops = suite.mc.Check(suite.regions[3]) re.NotNil(ops) + + // issue #8405 + suite.mc.startTime = time.Now() + suite.cluster.SetSplitMergeInterval(time.Second) + suite.cluster.SetSplitMergeInterval(time.Hour) + suite.mc.RecordRegionSplit([]uint64{suite.regions[2].GetID()}) + suite.cluster.SetSplitMergeInterval(time.Second) + suite.mc.Check(suite.regions[2]) // trigger the config update + time.Sleep(time.Second) // wait for the cache to gc + ops = suite.mc.Check(suite.regions[2]) + re.NotNil(ops) + ops = suite.mc.Check(suite.regions[3]) + re.NotNil(ops) } func (suite *mergeCheckerTestSuite) TestMatchPeers() { From 0b5ed0fce6620c0824c9d82fa6493d643003092e Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 17 Jul 2024 14:10:30 +0800 Subject: [PATCH 05/20] client: report error when scan region encounter hole region (#8375) close tikv/pd#8358 client: report error when scan region encounter hole region - add a input parameter(OutputMustContainAllKeyRange) in the BatchScanRegionsRequest - when the new param enable and find the result doesn't contain all key range(input), it will return an error to user - add Merge() method to merge the continuous KeyRanges - pull out the scanRegion function and add unit tests for it Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- client/client.go | 19 ++-- client/go.mod | 2 +- client/go.sum | 4 +- go.mod | 4 +- go.sum | 4 +- pkg/core/basic_cluster.go | 55 +++++++++- pkg/core/basic_cluster_test.go | 93 +++++++++++++++++ pkg/core/region.go | 100 ++++++++++++++---- pkg/core/region_test.go | 60 +++++++++++ server/grpc_service.go | 18 +++- tests/integrations/client/client_test.go | 127 ++++++++++++++++------- tests/integrations/go.mod | 2 +- tests/integrations/go.sum | 4 +- tools/go.mod | 2 +- tools/go.sum | 4 +- 15 files changed, 417 insertions(+), 81 deletions(-) create mode 100644 pkg/core/basic_cluster_test.go diff --git a/client/client.go b/client/client.go index 8c8299daeab..aafe4aba77f 100644 --- a/client/client.go +++ b/client/client.go @@ -214,8 +214,9 @@ func WithSkipStoreLimit() RegionsOption { // GetRegionOp represents available options when getting regions. type GetRegionOp struct { - needBuckets bool - allowFollowerHandle bool + needBuckets bool + allowFollowerHandle bool + outputMustContainAllKeyRange bool } // GetRegionOption configures GetRegionOp. @@ -231,6 +232,11 @@ func WithAllowFollowerHandle() GetRegionOption { return func(op *GetRegionOp) { op.allowFollowerHandle = true } } +// WithOutputMustContainAllKeyRange means the output must contain all key ranges. +func WithOutputMustContainAllKeyRange() GetRegionOption { + return func(op *GetRegionOp) { op.outputMustContainAllKeyRange = true } +} + var ( // errUnmatchedClusterID is returned when found a PD with a different cluster ID. errUnmatchedClusterID = errors.New("[pd] unmatched cluster id") @@ -1193,10 +1199,11 @@ func (c *client) BatchScanRegions(ctx context.Context, ranges []KeyRange, limit pbRanges = append(pbRanges, &pdpb.KeyRange{StartKey: r.StartKey, EndKey: r.EndKey}) } req := &pdpb.BatchScanRegionsRequest{ - Header: c.requestHeader(), - NeedBuckets: options.needBuckets, - Ranges: pbRanges, - Limit: int32(limit), + Header: c.requestHeader(), + NeedBuckets: options.needBuckets, + Ranges: pbRanges, + Limit: int32(limit), + ContainAllKeyRange: options.outputMustContainAllKeyRange, } serviceClient, cctx := c.getRegionAPIClientAndContext(scanCtx, options.allowFollowerHandle && c.option.getEnableFollowerHandle()) if serviceClient == nil { diff --git a/client/go.mod b/client/go.mod index 7c782695539..8dc706a4540 100644 --- a/client/go.mod +++ b/client/go.mod @@ -10,7 +10,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 + github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.8.2 diff --git a/client/go.sum b/client/go.sum index 8f85f5ce7ed..20c154c30dc 100644 --- a/client/go.sum +++ b/client/go.sum @@ -46,8 +46,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTm github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 h1:6aIKNB2YGAec4IUDLw6G2eDECiGiufZcgEbZSCELBx0= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 h1:V9XS3FQ/P6u+kFaoSyY5DBswIA774BMpIOLDBMrpxKc= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/go.mod b/go.mod index 1ef14f416e8..5b8074d285b 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/pingcap/errcode v0.3.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 + github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 @@ -173,7 +173,7 @@ require ( go.etcd.io/bbolt v1.3.9 // indirect go.uber.org/dig v1.9.0 // indirect go.uber.org/fx v1.12.0 // indirect - go.uber.org/multierr v1.11.0 // indirect + go.uber.org/multierr v1.11.0 golang.org/x/arch v0.3.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/image v0.10.0 // indirect diff --git a/go.sum b/go.sum index 659cd116e9c..baffeb0d6b7 100644 --- a/go.sum +++ b/go.sum @@ -371,8 +371,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 h1:6aIKNB2YGAec4IUDLw6G2eDECiGiufZcgEbZSCELBx0= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 h1:V9XS3FQ/P6u+kFaoSyY5DBswIA774BMpIOLDBMrpxKc= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= diff --git a/pkg/core/basic_cluster.go b/pkg/core/basic_cluster.go index ea78c4ccf9c..f0b23bd6434 100644 --- a/pkg/core/basic_cluster.go +++ b/pkg/core/basic_cluster.go @@ -14,6 +14,8 @@ package core +import "bytes" + // BasicCluster provides basic data member and interface for a tikv cluster. type BasicCluster struct { *StoresInfo @@ -97,7 +99,29 @@ type RegionSetInformer interface { GetAdjacentRegions(region *RegionInfo) (*RegionInfo, *RegionInfo) ScanRegions(startKey, endKey []byte, limit int) []*RegionInfo GetRegionByKey(regionKey []byte) *RegionInfo - BatchScanRegions(keyRanges *KeyRanges, limit int) []*RegionInfo + BatchScanRegions(keyRanges *KeyRanges, opts ...BatchScanRegionsOptionFunc) ([]*RegionInfo, error) +} + +type batchScanRegionsOptions struct { + limit int + outputMustContainAllKeyRange bool +} + +// BatchScanRegionsOptionFunc is the option function for BatchScanRegions. +type BatchScanRegionsOptionFunc func(*batchScanRegionsOptions) + +// WithLimit is an option for batchScanRegionsOptions. +func WithLimit(limit int) BatchScanRegionsOptionFunc { + return func(opt *batchScanRegionsOptions) { + opt.limit = limit + } +} + +// WithOutputMustContainAllKeyRange is an option for batchScanRegionsOptions. +func WithOutputMustContainAllKeyRange() BatchScanRegionsOptionFunc { + return func(opt *batchScanRegionsOptions) { + opt.outputMustContainAllKeyRange = true + } } // StoreSetInformer provides access to a shared informer of stores. @@ -136,7 +160,7 @@ func NewKeyRange(startKey, endKey string) KeyRange { } } -// KeyRanges is a slice of KeyRange. +// KeyRanges is a slice of monotonically increasing KeyRange. type KeyRanges struct { krs []*KeyRange } @@ -163,3 +187,30 @@ func (rs *KeyRanges) Ranges() []*KeyRange { } return rs.krs } + +// Merge merges the continuous KeyRanges. +func (rs *KeyRanges) Merge() { + if len(rs.krs) == 0 { + return + } + merged := make([]*KeyRange, 0, len(rs.krs)) + start := rs.krs[0].StartKey + end := rs.krs[0].EndKey + for _, kr := range rs.krs[1:] { + if bytes.Equal(end, kr.StartKey) { + end = kr.EndKey + } else { + merged = append(merged, &KeyRange{ + StartKey: start, + EndKey: end, + }) + start = kr.StartKey + end = kr.EndKey + } + } + merged = append(merged, &KeyRange{ + StartKey: start, + EndKey: end, + }) + rs.krs = merged +} diff --git a/pkg/core/basic_cluster_test.go b/pkg/core/basic_cluster_test.go new file mode 100644 index 00000000000..3d74dd49eea --- /dev/null +++ b/pkg/core/basic_cluster_test.go @@ -0,0 +1,93 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMergeKeyRanges(t *testing.T) { + re := require.New(t) + + testCases := []struct { + name string + input []*KeyRange + expect []*KeyRange + }{ + { + name: "empty", + input: []*KeyRange{}, + expect: []*KeyRange{}, + }, + { + name: "single", + input: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + }, + expect: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + }, + }, + { + name: "non-overlapping", + input: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + {StartKey: []byte("c"), EndKey: []byte("d")}, + }, + expect: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + {StartKey: []byte("c"), EndKey: []byte("d")}, + }, + }, + { + name: "continuous", + input: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + {StartKey: []byte("b"), EndKey: []byte("c")}, + }, + expect: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("c")}, + }, + }, + { + name: "boundless 1", + input: []*KeyRange{ + {StartKey: nil, EndKey: []byte("b")}, + {StartKey: []byte("b"), EndKey: []byte("c")}, + }, + expect: []*KeyRange{ + {StartKey: nil, EndKey: []byte("c")}, + }, + }, + { + name: "boundless 2", + input: []*KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + {StartKey: []byte("b"), EndKey: nil}, + }, + expect: []*KeyRange{ + {StartKey: []byte("a"), EndKey: nil}, + }, + }, + } + + for _, tc := range testCases { + rs := &KeyRanges{krs: tc.input} + rs.Merge() + re.Equal(tc.expect, rs.Ranges(), tc.name) + } +} diff --git a/pkg/core/region.go b/pkg/core/region.go index f0c78f443bd..eb8b89aecff 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -1826,37 +1826,91 @@ func (r *RegionsInfo) ScanRegions(startKey, endKey []byte, limit int) []*RegionI // BatchScanRegions scans regions in given key pairs, returns at most `limit` regions. // limit <= 0 means no limit. // The given key pairs should be non-overlapping. -func (r *RegionsInfo) BatchScanRegions(keyRanges *KeyRanges, limit int) []*RegionInfo { - r.t.RLock() - defer r.t.RUnlock() - +func (r *RegionsInfo) BatchScanRegions(keyRanges *KeyRanges, opts ...BatchScanRegionsOptionFunc) ([]*RegionInfo, error) { + keyRanges.Merge() krs := keyRanges.Ranges() res := make([]*RegionInfo, 0, len(krs)) - var lastRegion *RegionInfo + + scanOptions := &batchScanRegionsOptions{} + for _, opt := range opts { + opt(scanOptions) + } + + r.t.RLock() + defer r.t.RUnlock() for _, keyRange := range krs { - if limit > 0 && len(res) >= limit { - return res + if scanOptions.limit > 0 && len(res) >= scanOptions.limit { + res = res[:scanOptions.limit] + return res, nil } - if lastRegion != nil { - if lastRegion.Contains(keyRange.EndKey) { - continue - } else if lastRegion.Contains(keyRange.StartKey) { - keyRange.StartKey = lastRegion.GetEndKey() - } + + regions, err := scanRegion(r.tree, keyRange, scanOptions.limit, scanOptions.outputMustContainAllKeyRange) + if err != nil { + return nil, err } - r.tree.scanRange(keyRange.StartKey, func(region *RegionInfo) bool { - if len(keyRange.EndKey) > 0 && bytes.Compare(region.GetStartKey(), keyRange.EndKey) >= 0 { - return false - } - if limit > 0 && len(res) >= limit { + if len(res) > 0 && len(regions) > 0 && res[len(res)-1].meta.Id == regions[0].meta.Id { + // skip the region that has been scanned + regions = regions[1:] + } + res = append(res, regions...) + } + return res, nil +} + +func scanRegion(regionTree *regionTree, keyRange *KeyRange, limit int, outputMustContainAllKeyRange bool) ([]*RegionInfo, error) { + var ( + res []*RegionInfo + lastRegion = &RegionInfo{ + meta: &metapb.Region{EndKey: keyRange.StartKey}, + } + exceedLimit = func() bool { return limit > 0 && len(res) >= limit } + err error + ) + regionTree.scanRange(keyRange.StartKey, func(region *RegionInfo) bool { + if len(keyRange.EndKey) > 0 && len(region.GetStartKey()) > 0 && + bytes.Compare(region.GetStartKey(), keyRange.EndKey) >= 0 { + return false + } + if exceedLimit() { + return false + } + if len(lastRegion.GetEndKey()) > 0 && len(region.GetStartKey()) > 0 && + bytes.Compare(region.GetStartKey(), lastRegion.GetEndKey()) > 0 { + err = errs.ErrRegionNotAdjacent.FastGen( + "key range[%x, %x) found a hole region between region[%x, %x) and region[%x, %x)", + keyRange.StartKey, keyRange.EndKey, + lastRegion.GetStartKey(), lastRegion.GetEndKey(), + region.GetStartKey(), region.GetEndKey()) + log.Warn("scan regions failed", zap.Bool("outputMustContainAllKeyRange", + outputMustContainAllKeyRange), zap.Error(err)) + if outputMustContainAllKeyRange { return false } - lastRegion = region - res = append(res, region) - return true - }) + } + + lastRegion = region + res = append(res, region) + return true + }) + if outputMustContainAllKeyRange && err != nil { + return nil, err } - return res + + if !(exceedLimit()) && len(keyRange.EndKey) > 0 && len(lastRegion.GetEndKey()) > 0 && + bytes.Compare(lastRegion.GetEndKey(), keyRange.EndKey) < 0 { + err = errs.ErrRegionNotAdjacent.FastGen( + "key range[%x, %x) found a hole region in the last, the last scanned region is [%x, %x), [%x, %x) is missing", + keyRange.StartKey, keyRange.EndKey, + lastRegion.GetStartKey(), lastRegion.GetEndKey(), + lastRegion.GetEndKey(), keyRange.EndKey) + log.Warn("scan regions failed", zap.Bool("outputMustContainAllKeyRange", + outputMustContainAllKeyRange), zap.Error(err)) + if outputMustContainAllKeyRange { + return nil, err + } + } + + return res, nil } // ScanRegionWithIterator scans from the first region containing or behind start key, diff --git a/pkg/core/region_test.go b/pkg/core/region_test.go index 816bba4efae..845944780e4 100644 --- a/pkg/core/region_test.go +++ b/pkg/core/region_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/id" "github.com/tikv/pd/pkg/mock/mockid" ) @@ -1141,3 +1142,62 @@ func TestCntRefAfterResetRegionCache(t *testing.T) { regions.CheckAndPutRegion(region) re.Equal(int32(2), region.GetRef()) } + +func TestScanRegion(t *testing.T) { + var ( + re = require.New(t) + tree = newRegionTree() + needContainAllRanges = true + regions []*RegionInfo + err error + ) + scanError := func(startKey, endKey []byte, limit int) { + regions, err = scanRegion(tree, &KeyRange{StartKey: startKey, EndKey: endKey}, limit, needContainAllRanges) + re.Error(err) + } + scanNoError := func(startKey, endKey []byte, limit int) []*RegionInfo { + regions, err = scanRegion(tree, &KeyRange{StartKey: startKey, EndKey: endKey}, limit, needContainAllRanges) + re.NoError(err) + return regions + } + // region1 + // [a, b) + updateNewItem(tree, NewTestRegionInfo(1, 1, []byte("a"), []byte("b"))) + re.Len(scanNoError([]byte("a"), []byte("b"), 0), 1) + scanError([]byte("a"), []byte("c"), 0) + re.Len(scanNoError([]byte("a"), []byte("c"), 1), 1) + + // region1 | region2 + // [a, b) | [b, c) + updateNewItem(tree, NewTestRegionInfo(2, 1, []byte("b"), []byte("c"))) + re.Len(scanNoError([]byte("a"), []byte("c"), 0), 2) + re.Len(scanNoError([]byte("a"), []byte("c"), 1), 1) + + // region1 | region2 | region3 + // [a, b) | [b, c) | [d, f) + updateNewItem(tree, NewTestRegionInfo(3, 1, []byte("d"), []byte("f"))) + scanError([]byte("a"), []byte("e"), 0) + scanError([]byte("c"), []byte("e"), 0) + + // region1 | region2 | region3 | region4 + // [a, b) | [b, c) | [d, f) | [f, i) + updateNewItem(tree, NewTestRegionInfo(4, 1, []byte("f"), []byte("i"))) + scanError([]byte("c"), []byte("g"), 0) + re.Len(scanNoError([]byte("g"), []byte("h"), 0), 1) + re.Equal(uint64(4), regions[0].GetID()) + // test error type + scanError([]byte(string('a'-1)), []byte("g"), 0) + re.True(errs.ErrRegionNotAdjacent.Equal(err)) + + // region1 | region2 | region3 | region4 | region5 | region6 + // [a, b) | [b, c) | [d, f) | [f, i) | [j, k) | [l, +∞)] + updateNewItem(tree, NewTestRegionInfo(6, 1, []byte("l"), nil)) + // test boundless + re.Len(scanNoError([]byte("m"), nil, 0), 1) + + // ********** needContainAllRanges = false ********** + // Tests that previously reported errors will no longer report errors. + needContainAllRanges = false + re.Len(scanNoError([]byte("a"), []byte("e"), 0), 3) + re.Len(scanNoError([]byte("c"), []byte("e"), 0), 1) +} diff --git a/server/grpc_service.go b/server/grpc_service.go index d3f58dfe1ab..7b18be47fde 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -48,6 +48,7 @@ import ( "github.com/tikv/pd/pkg/versioninfo" "github.com/tikv/pd/server/cluster" "go.etcd.io/etcd/clientv3" + "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -1680,7 +1681,22 @@ func (s *GrpcServer) BatchScanRegions(ctx context.Context, request *pdpb.BatchSc } keyRanges.Append(reqRange.StartKey, reqRange.EndKey) } - res := rc.BatchScanRegions(keyRanges, int(limit)) + + scanOptions := []core.BatchScanRegionsOptionFunc{core.WithLimit(int(limit))} + if request.ContainAllKeyRange { + scanOptions = append(scanOptions, core.WithOutputMustContainAllKeyRange()) + } + res, err := rc.BatchScanRegions(keyRanges, scanOptions...) + if err != nil { + if errs.ErrRegionNotAdjacent.Equal(multierr.Errors(err)[0]) { + return &pdpb.BatchScanRegionsResponse{ + Header: s.wrapErrorToHeader(pdpb.ErrorType_REGIONS_NOT_CONTAIN_ALL_KEY_RANGE, err.Error()), + }, nil + } + return &pdpb.BatchScanRegionsResponse{ + Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + }, nil + } regions := make([]*pdpb.Region, 0, len(res)) for _, r := range res { leader := r.GetLeader() diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index 2e51c7080e9..e2cb2758b78 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -2001,9 +2001,13 @@ func waitLeaderChange(re *require.Assertions, cluster *tests.TestCluster, old st } func (suite *clientTestSuite) TestBatchScanRegions() { - re := suite.Require() - regionLen := 10 - regions := make([]*metapb.Region, 0, regionLen) + var ( + re = suite.Require() + ctx = context.Background() + regionLen = 10 + regions = make([]*metapb.Region, 0, regionLen) + ) + for i := 0; i < regionLen; i++ { regionID := regionIDAllocator.alloc() r := &metapb.Region{ @@ -2028,7 +2032,7 @@ func (suite *clientTestSuite) TestBatchScanRegions() { // Wait for region heartbeats. testutil.Eventually(re, func() bool { - scanRegions, err := suite.client.BatchScanRegions(context.Background(), []pd.KeyRange{{StartKey: []byte{0}, EndKey: nil}}, 10) + scanRegions, err := suite.client.BatchScanRegions(ctx, []pd.KeyRange{{StartKey: []byte{0}, EndKey: nil}}, 10) return err == nil && len(scanRegions) == 10 }) @@ -2049,39 +2053,45 @@ func (suite *clientTestSuite) TestBatchScanRegions() { suite.srv.GetRaftCluster().HandleRegionHeartbeat(region6) t := suite.T() + var outputMustContainAllKeyRangeOptions []bool check := func(ranges []pd.KeyRange, limit int, expect []*metapb.Region) { for _, bucket := range []bool{false, true} { - var opts []pd.GetRegionOption - if bucket { - opts = append(opts, pd.WithBuckets()) - } - scanRegions, err := suite.client.BatchScanRegions(context.Background(), ranges, limit, opts...) - re.NoError(err) - re.Len(scanRegions, len(expect)) - t.Log("scanRegions", scanRegions) - t.Log("expect", expect) - for i := range expect { - re.Equal(expect[i], scanRegions[i].Meta) - - if scanRegions[i].Meta.GetId() == region3.GetID() { - re.Equal(&metapb.Peer{}, scanRegions[i].Leader) - } else { - re.Equal(expect[i].Peers[0], scanRegions[i].Leader) + for _, outputMustContainAllKeyRange := range outputMustContainAllKeyRangeOptions { + var opts []pd.GetRegionOption + if bucket { + opts = append(opts, pd.WithBuckets()) } - - if scanRegions[i].Meta.GetId() == region4.GetID() { - re.Equal([]*metapb.Peer{expect[i].Peers[1]}, scanRegions[i].DownPeers) + if outputMustContainAllKeyRange { + opts = append(opts, pd.WithOutputMustContainAllKeyRange()) } + scanRegions, err := suite.client.BatchScanRegions(ctx, ranges, limit, opts...) + re.NoError(err) + t.Log("scanRegions", scanRegions) + t.Log("expect", expect) + re.Len(scanRegions, len(expect)) + for i := range expect { + re.Equal(expect[i], scanRegions[i].Meta) + + if scanRegions[i].Meta.GetId() == region3.GetID() { + re.Equal(&metapb.Peer{}, scanRegions[i].Leader) + } else { + re.Equal(expect[i].Peers[0], scanRegions[i].Leader) + } - if scanRegions[i].Meta.GetId() == region5.GetID() { - re.Equal([]*metapb.Peer{expect[i].Peers[1], expect[i].Peers[2]}, scanRegions[i].PendingPeers) - } + if scanRegions[i].Meta.GetId() == region4.GetID() { + re.Equal([]*metapb.Peer{expect[i].Peers[1]}, scanRegions[i].DownPeers) + } - if scanRegions[i].Meta.GetId() == region6.GetID() { - if !bucket { - re.Nil(scanRegions[i].Buckets) - } else { - re.Equal(scanRegions[i].Buckets, region6.GetBuckets()) + if scanRegions[i].Meta.GetId() == region5.GetID() { + re.Equal([]*metapb.Peer{expect[i].Peers[1], expect[i].Peers[2]}, scanRegions[i].PendingPeers) + } + + if scanRegions[i].Meta.GetId() == region6.GetID() { + if !bucket { + re.Nil(scanRegions[i].Buckets) + } else { + re.Equal(scanRegions[i].Buckets, region6.GetBuckets()) + } } } } @@ -2089,6 +2099,7 @@ func (suite *clientTestSuite) TestBatchScanRegions() { } // valid ranges + outputMustContainAllKeyRangeOptions = []bool{false, true} check([]pd.KeyRange{{StartKey: []byte{0}, EndKey: nil}}, 10, regions) check([]pd.KeyRange{{StartKey: []byte{1}, EndKey: nil}}, 5, regions[1:6]) check([]pd.KeyRange{ @@ -2105,6 +2116,8 @@ func (suite *clientTestSuite) TestBatchScanRegions() { {StartKey: []byte{6}, EndKey: []byte{7}}, {StartKey: []byte{8}, EndKey: []byte{9}}, }, 3, []*metapb.Region{regions[0], regions[2], regions[4]}) + + outputMustContainAllKeyRangeOptions = []bool{false} check([]pd.KeyRange{ {StartKey: []byte{0}, EndKey: []byte{0, 1}}, // non-continuous ranges in a region {StartKey: []byte{0, 2}, EndKey: []byte{0, 3}}, @@ -2112,14 +2125,56 @@ func (suite *clientTestSuite) TestBatchScanRegions() { {StartKey: []byte{0, 5}, EndKey: []byte{0, 6}}, {StartKey: []byte{0, 7}, EndKey: []byte{3}}, {StartKey: []byte{4}, EndKey: []byte{5}}, - }, 2, []*metapb.Region{regions[0], regions[1]}) + }, 10, []*metapb.Region{regions[0], regions[1], regions[2], regions[4]}) + outputMustContainAllKeyRangeOptions = []bool{false} + check([]pd.KeyRange{ + {StartKey: []byte{9}, EndKey: []byte{10, 1}}, + }, 10, []*metapb.Region{regions[9]}) // invalid ranges - _, err := suite.client.BatchScanRegions(context.Background(), []pd.KeyRange{{StartKey: []byte{1}, EndKey: []byte{0}}}, 10) - re.Error(err, "invalid key range, start key > end key") - _, err = suite.client.BatchScanRegions(context.Background(), []pd.KeyRange{ + _, err := suite.client.BatchScanRegions( + ctx, + []pd.KeyRange{{StartKey: []byte{1}, EndKey: []byte{0}}}, + 10, + pd.WithOutputMustContainAllKeyRange(), + ) + re.ErrorContains(err, "invalid key range, start key > end key") + _, err = suite.client.BatchScanRegions(ctx, []pd.KeyRange{ {StartKey: []byte{0}, EndKey: []byte{2}}, {StartKey: []byte{1}, EndKey: []byte{3}}, }, 10) - re.Error(err, "invalid key range, ranges overlapped") + re.ErrorContains(err, "invalid key range, ranges overlapped") + _, err = suite.client.BatchScanRegions( + ctx, + []pd.KeyRange{{StartKey: []byte{9}, EndKey: []byte{10, 1}}}, + 10, + pd.WithOutputMustContainAllKeyRange(), + ) + re.ErrorContains(err, "found a hole region in the last") + req := &pdpb.RegionHeartbeatRequest{ + Header: newHeader(suite.srv), + Region: &metapb.Region{ + Id: 100, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + StartKey: []byte{100}, + EndKey: []byte{101}, + Peers: peers, + }, + Leader: peers[0], + } + re.NoError(suite.regionHeartbeat.Send(req)) + + // Wait for region heartbeats. + testutil.Eventually(re, func() bool { + _, err = suite.client.BatchScanRegions( + ctx, + []pd.KeyRange{{StartKey: []byte{9}, EndKey: []byte{101}}}, + 10, + pd.WithOutputMustContainAllKeyRange(), + ) + return err != nil && strings.Contains(err.Error(), "found a hole region between") + }) } diff --git a/tests/integrations/go.mod b/tests/integrations/go.mod index 8a570d52458..a9f996417a4 100644 --- a/tests/integrations/go.mod +++ b/tests/integrations/go.mod @@ -14,7 +14,7 @@ require ( github.com/go-sql-driver/mysql v1.7.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c - github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 + github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_model v0.6.0 diff --git a/tests/integrations/go.sum b/tests/integrations/go.sum index c88919f6571..b46c01e77cc 100644 --- a/tests/integrations/go.sum +++ b/tests/integrations/go.sum @@ -368,8 +368,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgWM9fSBIvaxsJHuGP0uM74HXtv3MyyGQ= github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 h1:6aIKNB2YGAec4IUDLw6G2eDECiGiufZcgEbZSCELBx0= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 h1:V9XS3FQ/P6u+kFaoSyY5DBswIA774BMpIOLDBMrpxKc= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= diff --git a/tools/go.mod b/tools/go.mod index f424f12458e..af187b4999c 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -22,7 +22,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 - github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 + github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.0 diff --git a/tools/go.sum b/tools/go.sum index c2656b3e656..f508ca92384 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -365,8 +365,8 @@ github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ue github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4 h1:6aIKNB2YGAec4IUDLw6G2eDECiGiufZcgEbZSCELBx0= -github.com/pingcap/kvproto v0.0.0-20240620063548-118a4cab53e4/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 h1:V9XS3FQ/P6u+kFaoSyY5DBswIA774BMpIOLDBMrpxKc= +github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= From 8cc53a51d62207a37913e958b400faace48097e1 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 17 Jul 2024 14:17:59 +0800 Subject: [PATCH 06/20] scheduler: add scheduler type and move metrics file (#8393) ref tikv/pd#8379 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/schedule/schedulers/balance_leader.go | 11 -- pkg/schedule/schedulers/balance_region.go | 12 -- pkg/schedule/schedulers/evict_leader.go | 9 - pkg/schedule/schedulers/evict_slow_store.go | 3 - pkg/schedule/schedulers/grant_hot_region.go | 6 - pkg/schedule/schedulers/grant_leader.go | 7 - pkg/schedule/schedulers/hot_region.go | 38 ---- pkg/schedule/schedulers/label.go | 9 - pkg/schedule/schedulers/metrics.go | 182 +++++++++++++++++- pkg/schedule/schedulers/random_merge.go | 10 - pkg/schedule/schedulers/scatter_range.go | 10 - pkg/schedule/schedulers/shuffle_hot_region.go | 7 - pkg/schedule/schedulers/shuffle_leader.go | 8 - pkg/schedule/schedulers/shuffle_region.go | 10 - pkg/schedule/schedulers/split_bucket.go | 14 -- .../schedulers/transfer_witness_leader.go | 7 - pkg/schedule/type/type.go | 36 ++++ 17 files changed, 217 insertions(+), 162 deletions(-) diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index c4b9cd6ab5e..80755fbdbe5 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -55,17 +55,6 @@ const ( transferOut = "transfer-out" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - balanceLeaderScheduleCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "schedule") - balanceLeaderNoLeaderRegionCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "no-leader-region") - balanceLeaderRegionHotCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "region-hot") - balanceLeaderNoTargetStoreCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "no-target-store") - balanceLeaderNoFollowerRegionCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "no-follower-region") - balanceLeaderSkipCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "skip") - balanceLeaderNewOpCounter = schedulerCounter.WithLabelValues(BalanceLeaderName, "new-operator") -) - type balanceLeaderSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index bfc1a236481..488b7635b77 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -36,18 +36,6 @@ const ( BalanceRegionType = "balance-region" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - balanceRegionScheduleCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "schedule") - balanceRegionNoRegionCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "no-region") - balanceRegionHotCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "region-hot") - balanceRegionNoLeaderCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "no-leader") - balanceRegionNewOpCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "new-operator") - balanceRegionSkipCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "skip") - balanceRegionCreateOpFailCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "create-operator-fail") - balanceRegionNoReplacementCounter = schedulerCounter.WithLabelValues(BalanceRegionName, "no-replacement") -) - type balanceRegionSchedulerConfig struct { Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 8f56643f384..6cbd04ee671 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -47,15 +47,6 @@ const ( lastStoreDeleteInfo = "The last store has been deleted" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - evictLeaderCounter = schedulerCounter.WithLabelValues(EvictLeaderName, "schedule") - evictLeaderNoLeaderCounter = schedulerCounter.WithLabelValues(EvictLeaderName, "no-leader") - evictLeaderPickUnhealthyCounter = schedulerCounter.WithLabelValues(EvictLeaderName, "pick-unhealthy-region") - evictLeaderNoTargetStoreCounter = schedulerCounter.WithLabelValues(EvictLeaderName, "no-target-store") - evictLeaderNewOperatorCounter = schedulerCounter.WithLabelValues(EvictLeaderName, "new-operator") -) - type evictLeaderSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 9b13e292c87..b4cc79e782a 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -43,9 +43,6 @@ const ( slowStoreRecoverThreshold = 1 ) -// WithLabelValues is a heavy operation, define variable to avoid call it every time. -var evictSlowStoreCounter = schedulerCounter.WithLabelValues(EvictSlowStoreName, "schedule") - type evictSlowStoreSchedulerConfig struct { syncutil.RWMutex cluster *core.BasicCluster diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 4ab82be4cbe..a19a4e1bf4b 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -47,12 +47,6 @@ const ( GrantHotRegionType = "grant-hot-region" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - grantHotRegionCounter = schedulerCounter.WithLabelValues(GrantHotRegionName, "schedule") - grantHotRegionSkipCounter = schedulerCounter.WithLabelValues(GrantHotRegionName, "skip") -) - type grantHotRegionSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 4752ef3e61d..21900fac85d 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -42,13 +42,6 @@ const ( GrantLeaderType = "grant-leader" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - grantLeaderCounter = schedulerCounter.WithLabelValues(GrantLeaderName, "schedule") - grantLeaderNoFollowerCounter = schedulerCounter.WithLabelValues(GrantLeaderName, "no-follower") - grantLeaderNewOperatorCounter = schedulerCounter.WithLabelValues(GrantLeaderName, "new-operator") -) - type grantLeaderSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index d20473fb010..d7e83fd4fb2 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -71,44 +71,6 @@ var ( statisticsInterval = time.Second ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - hotSchedulerCounter = schedulerCounter.WithLabelValues(HotRegionName, "schedule") - hotSchedulerSkipCounter = schedulerCounter.WithLabelValues(HotRegionName, "skip") - hotSchedulerSearchRevertRegionsCounter = schedulerCounter.WithLabelValues(HotRegionName, "search_revert_regions") - hotSchedulerNotSameEngineCounter = schedulerCounter.WithLabelValues(HotRegionName, "not_same_engine") - hotSchedulerNoRegionCounter = schedulerCounter.WithLabelValues(HotRegionName, "no_region") - hotSchedulerUnhealthyReplicaCounter = schedulerCounter.WithLabelValues(HotRegionName, "unhealthy_replica") - hotSchedulerAbnormalReplicaCounter = schedulerCounter.WithLabelValues(HotRegionName, "abnormal_replica") - hotSchedulerCreateOperatorFailedCounter = schedulerCounter.WithLabelValues(HotRegionName, "create_operator_failed") - hotSchedulerNewOperatorCounter = schedulerCounter.WithLabelValues(HotRegionName, "new_operator") - hotSchedulerSnapshotSenderLimitCounter = schedulerCounter.WithLabelValues(HotRegionName, "snapshot_sender_limit") - - // counter related with the split region - hotSchedulerNotFoundSplitKeysCounter = schedulerCounter.WithLabelValues(HotRegionName, "not_found_split_keys") - hotSchedulerRegionBucketsNotHotCounter = schedulerCounter.WithLabelValues(HotRegionName, "region_buckets_not_hot") - hotSchedulerOnlyOneBucketsHotCounter = schedulerCounter.WithLabelValues(HotRegionName, "only_one_buckets_hot") - hotSchedulerHotBucketNotValidCounter = schedulerCounter.WithLabelValues(HotRegionName, "hot_buckets_not_valid") - hotSchedulerRegionBucketsSingleHotSpotCounter = schedulerCounter.WithLabelValues(HotRegionName, "region_buckets_single_hot_spot") - hotSchedulerSplitSuccessCounter = schedulerCounter.WithLabelValues(HotRegionName, "split_success") - hotSchedulerNeedSplitBeforeScheduleCounter = schedulerCounter.WithLabelValues(HotRegionName, "need_split_before_move_peer") - hotSchedulerRegionTooHotNeedSplitCounter = schedulerCounter.WithLabelValues(HotRegionName, "region_is_too_hot_need_split") - - hotSchedulerMoveLeaderCounter = schedulerCounter.WithLabelValues(HotRegionName, moveLeader.String()) - hotSchedulerMovePeerCounter = schedulerCounter.WithLabelValues(HotRegionName, movePeer.String()) - hotSchedulerTransferLeaderCounter = schedulerCounter.WithLabelValues(HotRegionName, transferLeader.String()) - - readSkipAllDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "read-skip-all-dim-uniform-store") - writeSkipAllDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "write-skip-all-dim-uniform-store") - readSkipByteDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "read-skip-byte-uniform-store") - writeSkipByteDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "write-skip-byte-uniform-store") - readSkipKeyDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "read-skip-key-uniform-store") - writeSkipKeyDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "write-skip-key-uniform-store") - readSkipQueryDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "read-skip-query-uniform-store") - writeSkipQueryDimUniformStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "write-skip-query-uniform-store") - pendingOpFailsStoreCounter = schedulerCounter.WithLabelValues(HotRegionName, "pending-op-fails") -) - type baseHotScheduler struct { *BaseScheduler // stLoadInfos contain store statistics information by resource type. diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 24875e3e26a..6b7a98f8d02 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -34,15 +34,6 @@ const ( LabelType = "label" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - labelCounter = schedulerCounter.WithLabelValues(LabelName, "schedule") - labelNewOperatorCounter = schedulerCounter.WithLabelValues(LabelName, "new-operator") - labelNoTargetCounter = schedulerCounter.WithLabelValues(LabelName, "no-target") - labelSkipCounter = schedulerCounter.WithLabelValues(LabelName, "skip") - labelNoRegionCounter = schedulerCounter.WithLabelValues(LabelName, "no-region") -) - type labelSchedulerConfig struct { Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` diff --git a/pkg/schedule/schedulers/metrics.go b/pkg/schedule/schedulers/metrics.go index 34e4606a7ce..f8bd2b4d686 100644 --- a/pkg/schedule/schedulers/metrics.go +++ b/pkg/schedule/schedulers/metrics.go @@ -14,7 +14,10 @@ package schedulers -import "github.com/prometheus/client_golang/prometheus" +import ( + "github.com/prometheus/client_golang/prometheus" + types "github.com/tikv/pd/pkg/schedule/type" +) var ( schedulerStatusGauge = prometheus.NewGaugeVec( @@ -161,3 +164,180 @@ func init() { prometheus.MustRegister(storeSlowTrendMiscGauge) prometheus.MustRegister(HotPendingSum) } + +func balanceLeaderCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.BalanceLeaderScheduler.String(), event) +} + +func balanceRegionCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.BalanceRegionScheduler.String(), event) +} + +func evictLeaderCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.EvictLeaderScheduler.String(), event) +} + +func grantHotRegionCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.GrantHotRegionScheduler.String(), event) +} + +func grantLeaderCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.GrantHotRegionScheduler.String(), event) +} + +func hotRegionCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.HotRegionScheduler.String(), event) +} + +func labelCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.LabelScheduler.String(), event) +} + +func randomMergeCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.RandomMergeScheduler.String(), event) +} + +func scatterRangeCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.ScatterRangeScheduler.String(), event) +} + +func shuffleHotRegionCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.ShuffleHotRegionScheduler.String(), event) +} + +func shuffleLeaderCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.ShuffleLeaderScheduler.String(), event) +} + +func shuffleRegionCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.ShuffleRegionScheduler.String(), event) +} + +func splitBucketCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.SplitBucketScheduler.String(), event) +} + +func transferWitnessLeaderCounterWithEvent(event string) prometheus.Counter { + return schedulerCounter.WithLabelValues(types.TransferWitnessLeaderScheduler.String(), event) +} + +// WithLabelValues is a heavy operation, define variable to avoid call it every time. +var ( + balanceLeaderScheduleCounter = balanceLeaderCounterWithEvent("schedule") + balanceLeaderNoLeaderRegionCounter = balanceLeaderCounterWithEvent("no-leader-region") + balanceLeaderRegionHotCounter = balanceLeaderCounterWithEvent("region-hot") + balanceLeaderNoTargetStoreCounter = balanceLeaderCounterWithEvent("no-target-store") + balanceLeaderNoFollowerRegionCounter = balanceLeaderCounterWithEvent("no-follower-region") + balanceLeaderSkipCounter = balanceLeaderCounterWithEvent("skip") + balanceLeaderNewOpCounter = balanceLeaderCounterWithEvent("new-operator") + + balanceRegionScheduleCounter = balanceRegionCounterWithEvent("schedule") + balanceRegionNoRegionCounter = balanceRegionCounterWithEvent("no-region") + balanceRegionHotCounter = balanceRegionCounterWithEvent("region-hot") + balanceRegionNoLeaderCounter = balanceRegionCounterWithEvent("no-leader") + balanceRegionNewOpCounter = balanceRegionCounterWithEvent("new-operator") + balanceRegionSkipCounter = balanceRegionCounterWithEvent("skip") + balanceRegionCreateOpFailCounter = balanceRegionCounterWithEvent("create-operator-fail") + balanceRegionNoReplacementCounter = balanceRegionCounterWithEvent("no-replacement") + + evictLeaderCounter = evictLeaderCounterWithEvent("schedule") + evictLeaderNoLeaderCounter = evictLeaderCounterWithEvent("no-leader") + evictLeaderPickUnhealthyCounter = evictLeaderCounterWithEvent("pick-unhealthy-region") + evictLeaderNoTargetStoreCounter = evictLeaderCounterWithEvent("no-target-store") + evictLeaderNewOperatorCounter = evictLeaderCounterWithEvent("new-operator") + + evictSlowStoreCounter = schedulerCounter.WithLabelValues(types.EvictSlowStoreScheduler.String(), "schedule") + + grantHotRegionCounter = grantHotRegionCounterWithEvent("schedule") + grantHotRegionSkipCounter = grantHotRegionCounterWithEvent("skip") + + grantLeaderCounter = grantLeaderCounterWithEvent("schedule") + grantLeaderNoFollowerCounter = grantLeaderCounterWithEvent("no-follower") + grantLeaderNewOperatorCounter = grantLeaderCounterWithEvent("new-operator") + + // counter related with the hot region + hotSchedulerCounter = hotRegionCounterWithEvent("schedule") + hotSchedulerSkipCounter = hotRegionCounterWithEvent("skip") + hotSchedulerSearchRevertRegionsCounter = hotRegionCounterWithEvent("search_revert_regions") + hotSchedulerNotSameEngineCounter = hotRegionCounterWithEvent("not_same_engine") + hotSchedulerNoRegionCounter = hotRegionCounterWithEvent("no_region") + hotSchedulerUnhealthyReplicaCounter = hotRegionCounterWithEvent("unhealthy_replica") + hotSchedulerAbnormalReplicaCounter = hotRegionCounterWithEvent("abnormal_replica") + hotSchedulerCreateOperatorFailedCounter = hotRegionCounterWithEvent("create_operator_failed") + hotSchedulerNewOperatorCounter = hotRegionCounterWithEvent("new_operator") + hotSchedulerSnapshotSenderLimitCounter = hotRegionCounterWithEvent("snapshot_sender_limit") + // hot region counter related with the split region + hotSchedulerNotFoundSplitKeysCounter = hotRegionCounterWithEvent("not_found_split_keys") + hotSchedulerRegionBucketsNotHotCounter = hotRegionCounterWithEvent("region_buckets_not_hot") + hotSchedulerOnlyOneBucketsHotCounter = hotRegionCounterWithEvent("only_one_buckets_hot") + hotSchedulerHotBucketNotValidCounter = hotRegionCounterWithEvent("hot_buckets_not_valid") + hotSchedulerRegionBucketsSingleHotSpotCounter = hotRegionCounterWithEvent("region_buckets_single_hot_spot") + hotSchedulerSplitSuccessCounter = hotRegionCounterWithEvent("split_success") + hotSchedulerNeedSplitBeforeScheduleCounter = hotRegionCounterWithEvent("need_split_before_move_peer") + hotSchedulerRegionTooHotNeedSplitCounter = hotRegionCounterWithEvent("region_is_too_hot_need_split") + // hot region counter related with the move peer + hotSchedulerMoveLeaderCounter = hotRegionCounterWithEvent(moveLeader.String()) + hotSchedulerMovePeerCounter = hotRegionCounterWithEvent(movePeer.String()) + hotSchedulerTransferLeaderCounter = hotRegionCounterWithEvent(transferLeader.String()) + // hot region counter related with reading and writing + readSkipAllDimUniformStoreCounter = hotRegionCounterWithEvent("read-skip-all-dim-uniform-store") + writeSkipAllDimUniformStoreCounter = hotRegionCounterWithEvent("write-skip-all-dim-uniform-store") + readSkipByteDimUniformStoreCounter = hotRegionCounterWithEvent("read-skip-byte-uniform-store") + writeSkipByteDimUniformStoreCounter = hotRegionCounterWithEvent("write-skip-byte-uniform-store") + readSkipKeyDimUniformStoreCounter = hotRegionCounterWithEvent("read-skip-key-uniform-store") + writeSkipKeyDimUniformStoreCounter = hotRegionCounterWithEvent("write-skip-key-uniform-store") + readSkipQueryDimUniformStoreCounter = hotRegionCounterWithEvent("read-skip-query-uniform-store") + writeSkipQueryDimUniformStoreCounter = hotRegionCounterWithEvent("write-skip-query-uniform-store") + pendingOpFailsStoreCounter = hotRegionCounterWithEvent("pending-op-fails") + + labelCounter = labelCounterWithEvent("schedule") + labelNewOperatorCounter = labelCounterWithEvent("new-operator") + labelNoTargetCounter = labelCounterWithEvent("no-target") + labelSkipCounter = labelCounterWithEvent("skip") + labelNoRegionCounter = labelCounterWithEvent("no-region") + + randomMergeCounter = randomMergeCounterWithEvent("schedule") + randomMergeNewOperatorCounter = randomMergeCounterWithEvent("new-operator") + randomMergeNoSourceStoreCounter = randomMergeCounterWithEvent("no-source-store") + randomMergeNoRegionCounter = randomMergeCounterWithEvent("no-region") + randomMergeNoTargetStoreCounter = randomMergeCounterWithEvent("no-target-store") + randomMergeNotAllowedCounter = randomMergeCounterWithEvent("not-allowed") + + scatterRangeCounter = scatterRangeCounterWithEvent("schedule") + scatterRangeNewOperatorCounter = scatterRangeCounterWithEvent("new-operator") + scatterRangeNewLeaderOperatorCounter = scatterRangeCounterWithEvent("new-leader-operator") + scatterRangeNewRegionOperatorCounter = scatterRangeCounterWithEvent("new-region-operator") + scatterRangeNoNeedBalanceRegionCounter = scatterRangeCounterWithEvent("no-need-balance-region") + scatterRangeNoNeedBalanceLeaderCounter = scatterRangeCounterWithEvent("no-need-balance-leader") + + shuffleHotRegionCounter = shuffleHotRegionCounterWithEvent("schedule") + shuffleHotRegionNewOperatorCounter = shuffleHotRegionCounterWithEvent("new-operator") + shuffleHotRegionSkipCounter = shuffleHotRegionCounterWithEvent("skip") + + shuffleLeaderCounter = shuffleLeaderCounterWithEvent("schedule") + shuffleLeaderNewOperatorCounter = shuffleLeaderCounterWithEvent("new-operator") + shuffleLeaderNoTargetStoreCounter = shuffleLeaderCounterWithEvent("no-target-store") + shuffleLeaderNoFollowerCounter = shuffleLeaderCounterWithEvent("no-follower") + + shuffleRegionCounter = shuffleRegionCounterWithEvent("schedule") + shuffleRegionNewOperatorCounter = shuffleRegionCounterWithEvent("new-operator") + shuffleRegionNoRegionCounter = shuffleRegionCounterWithEvent("no-region") + shuffleRegionNoNewPeerCounter = shuffleRegionCounterWithEvent("no-new-peer") + shuffleRegionCreateOperatorFailCounter = shuffleRegionCounterWithEvent("create-operator-fail") + shuffleRegionNoSourceStoreCounter = shuffleRegionCounterWithEvent("no-source-store") + + splitBucketDisableCounter = splitBucketCounterWithEvent("bucket-disable") + splitBuckerSplitLimitCounter = splitBucketCounterWithEvent("split-limit") + splitBucketScheduleCounter = splitBucketCounterWithEvent("schedule") + splitBucketNoRegionCounter = splitBucketCounterWithEvent("no-region") + splitBucketRegionTooSmallCounter = splitBucketCounterWithEvent("region-too-small") + splitBucketOperatorExistCounter = splitBucketCounterWithEvent("operator-exist") + splitBucketKeyRangeNotMatchCounter = splitBucketCounterWithEvent("key-range-not-match") + splitBucketNoSplitKeysCounter = splitBucketCounterWithEvent("no-split-keys") + splitBucketCreateOperatorFailCounter = splitBucketCounterWithEvent("create-operator-fail") + splitBucketNewOperatorCounter = splitBucketCounterWithEvent("new-operator") + + transferWitnessLeaderCounter = transferWitnessLeaderCounterWithEvent("schedule") + transferWitnessLeaderNewOperatorCounter = transferWitnessLeaderCounterWithEvent("new-operator") + transferWitnessLeaderNoTargetStoreCounter = transferWitnessLeaderCounterWithEvent("no-target-store") +) diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 7fec0bd9530..ff96afe03eb 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -35,16 +35,6 @@ const ( RandomMergeType = "random-merge" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - randomMergeCounter = schedulerCounter.WithLabelValues(RandomMergeName, "schedule") - randomMergeNewOperatorCounter = schedulerCounter.WithLabelValues(RandomMergeName, "new-operator") - randomMergeNoSourceStoreCounter = schedulerCounter.WithLabelValues(RandomMergeName, "no-source-store") - randomMergeNoRegionCounter = schedulerCounter.WithLabelValues(RandomMergeName, "no-region") - randomMergeNoTargetStoreCounter = schedulerCounter.WithLabelValues(RandomMergeName, "no-target-store") - randomMergeNotAllowedCounter = schedulerCounter.WithLabelValues(RandomMergeName, "not-allowed") -) - type randomMergeSchedulerConfig struct { Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index ebee66dc207..17c67a154ab 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -38,16 +38,6 @@ const ( ScatterRangeName = "scatter-range" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - scatterRangeCounter = schedulerCounter.WithLabelValues(ScatterRangeName, "schedule") - scatterRangeNewOperatorCounter = schedulerCounter.WithLabelValues(ScatterRangeName, "new-operator") - scatterRangeNewLeaderOperatorCounter = schedulerCounter.WithLabelValues(ScatterRangeName, "new-leader-operator") - scatterRangeNewRegionOperatorCounter = schedulerCounter.WithLabelValues(ScatterRangeName, "new-region-operator") - scatterRangeNoNeedBalanceRegionCounter = schedulerCounter.WithLabelValues(ScatterRangeName, "no-need-balance-region") - scatterRangeNoNeedBalanceLeaderCounter = schedulerCounter.WithLabelValues(ScatterRangeName, "no-need-balance-leader") -) - type scatterRangeSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index 4b5b5fd68bf..f4b566c56a4 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -41,13 +41,6 @@ const ( ShuffleHotRegionType = "shuffle-hot-region" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - shuffleHotRegionCounter = schedulerCounter.WithLabelValues(ShuffleHotRegionName, "schedule") - shuffleHotRegionNewOperatorCounter = schedulerCounter.WithLabelValues(ShuffleHotRegionName, "new-operator") - shuffleHotRegionSkipCounter = schedulerCounter.WithLabelValues(ShuffleHotRegionName, "skip") -) - type shuffleHotRegionSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 5b3dfd9fd20..17b5fae6448 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -32,14 +32,6 @@ const ( ShuffleLeaderType = "shuffle-leader" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - shuffleLeaderCounter = schedulerCounter.WithLabelValues(ShuffleLeaderName, "schedule") - shuffleLeaderNewOperatorCounter = schedulerCounter.WithLabelValues(ShuffleLeaderName, "new-operator") - shuffleLeaderNoTargetStoreCounter = schedulerCounter.WithLabelValues(ShuffleLeaderName, "no-target-store") - shuffleLeaderNoFollowerCounter = schedulerCounter.WithLabelValues(ShuffleLeaderName, "no-follower") -) - type shuffleLeaderSchedulerConfig struct { Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index b1a100384ae..57f6c618962 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -33,16 +33,6 @@ const ( ShuffleRegionType = "shuffle-region" ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - shuffleRegionCounter = schedulerCounter.WithLabelValues(ShuffleRegionName, "schedule") - shuffleRegionNewOperatorCounter = schedulerCounter.WithLabelValues(ShuffleRegionName, "new-operator") - shuffleRegionNoRegionCounter = schedulerCounter.WithLabelValues(ShuffleRegionName, "no-region") - shuffleRegionNoNewPeerCounter = schedulerCounter.WithLabelValues(ShuffleRegionName, "no-new-peer") - shuffleRegionCreateOperatorFailCounter = schedulerCounter.WithLabelValues(ShuffleRegionName, "create-operator-fail") - shuffleRegionNoSourceStoreCounter = schedulerCounter.WithLabelValues(ShuffleRegionName, "no-source-store") -) - type shuffleRegionScheduler struct { *BaseScheduler conf *shuffleRegionSchedulerConfig diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 9b049bf6ba1..4516dfe4433 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -45,20 +45,6 @@ const ( defaultSplitLimit = 10 ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - splitBucketDisableCounter = schedulerCounter.WithLabelValues(SplitBucketName, "bucket-disable") - splitBuckerSplitLimitCounter = schedulerCounter.WithLabelValues(SplitBucketName, "split-limit") - splitBucketScheduleCounter = schedulerCounter.WithLabelValues(SplitBucketName, "schedule") - splitBucketNoRegionCounter = schedulerCounter.WithLabelValues(SplitBucketName, "no-region") - splitBucketRegionTooSmallCounter = schedulerCounter.WithLabelValues(SplitBucketName, "region-too-small") - splitBucketOperatorExistCounter = schedulerCounter.WithLabelValues(SplitBucketName, "operator-exist") - splitBucketKeyRangeNotMatchCounter = schedulerCounter.WithLabelValues(SplitBucketName, "key-range-not-match") - splitBucketNoSplitKeysCounter = schedulerCounter.WithLabelValues(SplitBucketName, "no-split-keys") - splitBucketCreateOperatorFailCounter = schedulerCounter.WithLabelValues(SplitBucketName, "create-operator-fail") - splitBucketNewOperatorCounter = schedulerCounter.WithLabelValues(SplitBucketName, "new-operator") -) - func initSplitBucketConfig() *splitBucketSchedulerConfig { return &splitBucketSchedulerConfig{ Degree: defaultHotDegree, diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 9ba78985d13..2050194b9ae 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -40,13 +40,6 @@ const ( transferWitnessLeaderRecvMaxRegionSize = 10000 ) -var ( - // WithLabelValues is a heavy operation, define variable to avoid call it every time. - transferWitnessLeaderCounter = schedulerCounter.WithLabelValues(TransferWitnessLeaderName, "schedule") - transferWitnessLeaderNewOperatorCounter = schedulerCounter.WithLabelValues(TransferWitnessLeaderName, "new-operator") - transferWitnessLeaderNoTargetStoreCounter = schedulerCounter.WithLabelValues(TransferWitnessLeaderName, "no-target-store") -) - type transferWitnessLeaderScheduler struct { *BaseScheduler regions chan *core.RegionInfo diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index d872bf0408c..65b2f0f682d 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -33,4 +33,40 @@ const ( RuleChecker CheckerSchedulerType = "rule-checker" // SplitChecker is the name for split checker. SplitChecker CheckerSchedulerType = "split-checker" + + // BalanceLeaderScheduler is balance leader scheduler name. + BalanceLeaderScheduler CheckerSchedulerType = "balance-leader-scheduler" + // BalanceRegionScheduler is balance region scheduler name. + BalanceRegionScheduler CheckerSchedulerType = "balance-region-scheduler" + // BalanceWitnessScheduler is balance witness scheduler name. + BalanceWitnessScheduler CheckerSchedulerType = "balance-witness-scheduler" + // EvictLeaderScheduler is evict leader scheduler name. + EvictLeaderScheduler CheckerSchedulerType = "evict-leader-scheduler" + // EvictSlowStoreScheduler is evict leader scheduler name. + EvictSlowStoreScheduler CheckerSchedulerType = "evict-slow-store-scheduler" + // EvictSlowTrendScheduler is evict leader by slow trend scheduler name. + EvictSlowTrendScheduler CheckerSchedulerType = "evict-slow-trend-scheduler" + // GrantLeaderScheduler is grant leader scheduler name. + GrantLeaderScheduler CheckerSchedulerType = "grant-leader-scheduler" + // GrantHotRegionScheduler is grant hot region scheduler name. + GrantHotRegionScheduler CheckerSchedulerType = "grant-hot-region-scheduler" + // HotRegionScheduler is balance hot region scheduler name. + HotRegionScheduler CheckerSchedulerType = "balance-hot-region-scheduler" + // RandomMergeScheduler is random merge scheduler name. + RandomMergeScheduler CheckerSchedulerType = "random-merge-scheduler" + // ScatterRangeScheduler is scatter range scheduler name. + // TODO: update to `scatter-range-scheduler` + ScatterRangeScheduler CheckerSchedulerType = "scatter-range" + // ShuffleHotRegionScheduler is shuffle hot region scheduler name. + ShuffleHotRegionScheduler CheckerSchedulerType = "shuffle-hot-region-scheduler" + // ShuffleLeaderScheduler is shuffle leader scheduler name. + ShuffleLeaderScheduler CheckerSchedulerType = "shuffle-leader-scheduler" + // ShuffleRegionScheduler is shuffle region scheduler name. + ShuffleRegionScheduler CheckerSchedulerType = "shuffle-region-scheduler" + // SplitBucketScheduler is the split bucket name. + SplitBucketScheduler CheckerSchedulerType = "split-bucket-scheduler" + // TransferWitnessLeaderScheduler is transfer witness leader scheduler name. + TransferWitnessLeaderScheduler CheckerSchedulerType = "transfer-witness-leader-scheduler" + // LabelScheduler is label scheduler name. + LabelScheduler CheckerSchedulerType = "label-scheduler" ) From 1a20c85284d5f4bf52be1566c89f3e3147a11ab5 Mon Sep 17 00:00:00 2001 From: zzm Date: Wed, 17 Jul 2024 16:33:00 +0800 Subject: [PATCH 07/20] show approximate_kv_size in pd-ctl (#8403) close tikv/pd#8412 Signed-off-by: zeminzhou --- pkg/response/region.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/pkg/response/region.go b/pkg/response/region.go index 153294c2861..6db7f135ad8 100644 --- a/pkg/response/region.go +++ b/pkg/response/region.go @@ -115,17 +115,18 @@ type RegionInfo struct { RegionEpoch *metapb.RegionEpoch `json:"epoch,omitempty"` Peers []MetaPeer `json:"peers,omitempty"` - Leader MetaPeer `json:"leader,omitempty"` - DownPeers []PDPeerStats `json:"down_peers,omitempty"` - PendingPeers []MetaPeer `json:"pending_peers,omitempty"` - CPUUsage uint64 `json:"cpu_usage"` - WrittenBytes uint64 `json:"written_bytes"` - ReadBytes uint64 `json:"read_bytes"` - WrittenKeys uint64 `json:"written_keys"` - ReadKeys uint64 `json:"read_keys"` - ApproximateSize int64 `json:"approximate_size"` - ApproximateKeys int64 `json:"approximate_keys"` - Buckets []string `json:"buckets,omitempty"` + Leader MetaPeer `json:"leader,omitempty"` + DownPeers []PDPeerStats `json:"down_peers,omitempty"` + PendingPeers []MetaPeer `json:"pending_peers,omitempty"` + CPUUsage uint64 `json:"cpu_usage"` + WrittenBytes uint64 `json:"written_bytes"` + ReadBytes uint64 `json:"read_bytes"` + WrittenKeys uint64 `json:"written_keys"` + ReadKeys uint64 `json:"read_keys"` + ApproximateSize int64 `json:"approximate_size"` + ApproximateKeys int64 `json:"approximate_keys"` + ApproximateKvSize int64 `json:"approximate_kv_size"` + Buckets []string `json:"buckets,omitempty"` ReplicationStatus *ReplicationStatus `json:"replication_status,omitempty"` } @@ -173,6 +174,7 @@ func InitRegion(r *core.RegionInfo, s *RegionInfo) *RegionInfo { s.ReadKeys = r.GetKeysRead() s.ApproximateSize = r.GetApproximateSize() s.ApproximateKeys = r.GetApproximateKeys() + s.ApproximateKvSize = r.GetApproximateKvSize() s.ReplicationStatus = fromPBReplicationStatus(r.GetReplicationStatus()) s.Buckets = nil From 3330a44fbea2b1fd19525b78da10240a746770e5 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 18 Jul 2024 15:39:01 +0800 Subject: [PATCH 08/20] scheduler: add batch config for evict leader scheduler (#8259) close tikv/pd#8265 Signed-off-by: Ryan Leung Co-authored-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/schedule/schedulers/evict_leader.go | 274 +++++++++++------- pkg/schedule/schedulers/evict_leader_test.go | 43 ++- pkg/schedule/schedulers/evict_slow_store.go | 6 +- pkg/schedule/schedulers/evict_slow_trend.go | 6 +- pkg/schedule/schedulers/init.go | 1 + pkg/schedule/schedulers/scheduler_test.go | 4 +- tools/pd-ctl/pdctl/command/scheduler.go | 4 + .../pd-ctl/tests/scheduler/scheduler_test.go | 23 +- 8 files changed, 239 insertions(+), 122 deletions(-) diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 6cbd04ee671..2adcfbe7e48 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -51,6 +51,8 @@ type evictLeaderSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage StoreIDWithRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` + // Batch is used to generate multiple operators by one scheduling + Batch int `json:"batch"` cluster *core.BasicCluster removeSchedulerCb func(string) error } @@ -65,23 +67,10 @@ func (conf *evictLeaderSchedulerConfig) getStores() []uint64 { return stores } -func (conf *evictLeaderSchedulerConfig) BuildWithArgs(args []string) error { - if len(args) != 1 { - return errs.ErrSchedulerConfig.FastGenByArgs("id") - } - - id, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return errs.ErrStrconvParseUint.Wrap(err) - } - ranges, err := getKeyRanges(args[1:]) - if err != nil { - return err - } - conf.Lock() - defer conf.Unlock() - conf.StoreIDWithRanges[id] = ranges - return nil +func (conf *evictLeaderSchedulerConfig) getBatch() int { + conf.RLock() + defer conf.RUnlock() + return conf.Batch } func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { @@ -93,13 +82,12 @@ func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { } return &evictLeaderSchedulerConfig{ StoreIDWithRanges: storeIDWithRanges, + Batch: conf.Batch, } } -func (conf *evictLeaderSchedulerConfig) Persist() error { +func (conf *evictLeaderSchedulerConfig) persistLocked() error { name := conf.getSchedulerName() - conf.RLock() - defer conf.RUnlock() data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -125,29 +113,29 @@ func (conf *evictLeaderSchedulerConfig) getRanges(id uint64) []string { return res } -func (conf *evictLeaderSchedulerConfig) removeStore(id uint64) (succ bool, last bool) { - conf.Lock() - defer conf.Unlock() +func (conf *evictLeaderSchedulerConfig) removeStoreLocked(id uint64) (bool, error) { _, exists := conf.StoreIDWithRanges[id] - succ, last = false, false if exists { delete(conf.StoreIDWithRanges, id) conf.cluster.ResumeLeaderTransfer(id) - succ = true - last = len(conf.StoreIDWithRanges) == 0 + return len(conf.StoreIDWithRanges) == 0, nil } - return succ, last + return false, errs.ErrScheduleConfigNotExist.FastGenByArgs() } -func (conf *evictLeaderSchedulerConfig) resetStore(id uint64, keyRange []core.KeyRange) { - conf.Lock() - defer conf.Unlock() +func (conf *evictLeaderSchedulerConfig) resetStoreLocked(id uint64, keyRange []core.KeyRange) { if err := conf.cluster.PauseLeaderTransfer(id); err != nil { log.Error("pause leader transfer failed", zap.Uint64("store-id", id), errs.ZapError(err)) } conf.StoreIDWithRanges[id] = keyRange } +func (conf *evictLeaderSchedulerConfig) resetStore(id uint64, keyRange []core.KeyRange) { + conf.Lock() + defer conf.Unlock() + conf.resetStoreLocked(id, keyRange) +} + func (conf *evictLeaderSchedulerConfig) getKeyRangesByID(id uint64) []core.KeyRange { conf.RLock() defer conf.RUnlock() @@ -157,6 +145,108 @@ func (conf *evictLeaderSchedulerConfig) getKeyRangesByID(id uint64) []core.KeyRa return nil } +func (conf *evictLeaderSchedulerConfig) encodeConfig() ([]byte, error) { + conf.RLock() + defer conf.RUnlock() + return EncodeConfig(conf) +} + +func (conf *evictLeaderSchedulerConfig) reloadConfig(name string) error { + conf.Lock() + defer conf.Unlock() + cfgData, err := conf.storage.LoadSchedulerConfig(name) + if err != nil { + return err + } + if len(cfgData) == 0 { + return nil + } + newCfg := &evictLeaderSchedulerConfig{} + if err = DecodeConfig([]byte(cfgData), newCfg); err != nil { + return err + } + pauseAndResumeLeaderTransfer(conf.cluster, conf.StoreIDWithRanges, newCfg.StoreIDWithRanges) + conf.StoreIDWithRanges = newCfg.StoreIDWithRanges + conf.Batch = newCfg.Batch + return nil +} + +func (conf *evictLeaderSchedulerConfig) pauseLeaderTransfer(cluster sche.SchedulerCluster) error { + conf.RLock() + defer conf.RUnlock() + var res error + for id := range conf.StoreIDWithRanges { + if err := cluster.PauseLeaderTransfer(id); err != nil { + res = err + } + } + return res +} + +func (conf *evictLeaderSchedulerConfig) resumeLeaderTransfer(cluster sche.SchedulerCluster) { + conf.RLock() + defer conf.RUnlock() + for id := range conf.StoreIDWithRanges { + cluster.ResumeLeaderTransfer(id) + } +} + +func (conf *evictLeaderSchedulerConfig) pauseLeaderTransferIfStoreNotExist(id uint64) (bool, error) { + conf.RLock() + defer conf.RUnlock() + if _, exist := conf.StoreIDWithRanges[id]; !exist { + if err := conf.cluster.PauseLeaderTransfer(id); err != nil { + return exist, err + } + } + return true, nil +} + +func (conf *evictLeaderSchedulerConfig) update(id uint64, newRanges []core.KeyRange, batch int) error { + conf.Lock() + defer conf.Unlock() + if id != 0 { + conf.StoreIDWithRanges[id] = newRanges + } + conf.Batch = batch + err := conf.persistLocked() + if err != nil && id != 0 { + _, _ = conf.removeStoreLocked(id) + } + return err +} + +func (conf *evictLeaderSchedulerConfig) delete(id uint64) (any, error) { + conf.Lock() + var resp any + last, err := conf.removeStoreLocked(id) + if err != nil { + conf.Unlock() + return resp, err + } + + keyRanges := conf.StoreIDWithRanges[id] + err = conf.persistLocked() + if err != nil { + conf.resetStoreLocked(id, keyRanges) + conf.Unlock() + return resp, err + } + if !last { + conf.Unlock() + return resp, nil + } + conf.Unlock() + if err := conf.removeSchedulerCb(EvictLeaderName); err != nil { + if !errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { + conf.resetStore(id, keyRanges) + } + return resp, err + } + resp = lastStoreDeleteInfo + return resp, nil +} + type evictLeaderScheduler struct { *BaseScheduler conf *evictLeaderSchedulerConfig @@ -193,48 +283,19 @@ func (*evictLeaderScheduler) GetType() string { } func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { - s.conf.RLock() - defer s.conf.RUnlock() - return EncodeConfig(s.conf) + return s.conf.encodeConfig() } func (s *evictLeaderScheduler) ReloadConfig() error { - s.conf.Lock() - defer s.conf.Unlock() - cfgData, err := s.conf.storage.LoadSchedulerConfig(s.GetName()) - if err != nil { - return err - } - if len(cfgData) == 0 { - return nil - } - newCfg := &evictLeaderSchedulerConfig{} - if err = DecodeConfig([]byte(cfgData), newCfg); err != nil { - return err - } - pauseAndResumeLeaderTransfer(s.conf.cluster, s.conf.StoreIDWithRanges, newCfg.StoreIDWithRanges) - s.conf.StoreIDWithRanges = newCfg.StoreIDWithRanges - return nil + return s.conf.reloadConfig(s.GetName()) } func (s *evictLeaderScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { - s.conf.RLock() - defer s.conf.RUnlock() - var res error - for id := range s.conf.StoreIDWithRanges { - if err := cluster.PauseLeaderTransfer(id); err != nil { - res = err - } - } - return res + return s.conf.pauseLeaderTransfer(cluster) } func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { - s.conf.RLock() - defer s.conf.RUnlock() - for id := range s.conf.StoreIDWithRanges { - cluster.ResumeLeaderTransfer(id) - } + s.conf.resumeLeaderTransfer(cluster) } func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { @@ -247,7 +308,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize), nil + return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf), nil } func uniqueAppendOperator(dst []*operator.Operator, src ...*operator.Operator) []*operator.Operator { @@ -268,10 +329,12 @@ func uniqueAppendOperator(dst []*operator.Operator, src ...*operator.Operator) [ type evictLeaderStoresConf interface { getStores() []uint64 getKeyRangesByID(id uint64) []core.KeyRange + getBatch() int } -func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf, batchSize int) []*operator.Operator { +func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { var ops []*operator.Operator + batchSize := conf.getBatch() for i := 0; i < batchSize; i++ { once := scheduleEvictLeaderOnce(name, typ, cluster, conf) // no more regions @@ -354,39 +417,50 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil { return } - var args []string - var exists bool - var id uint64 - idFloat, ok := input["store_id"].(float64) - if ok { + var ( + exist bool + err error + id uint64 + newRanges []core.KeyRange + ) + idFloat, inputHasStoreID := input["store_id"].(float64) + if inputHasStoreID { id = (uint64)(idFloat) - handler.config.RLock() - if _, exists = handler.config.StoreIDWithRanges[id]; !exists { - if err := handler.config.cluster.PauseLeaderTransfer(id); err != nil { - handler.config.RUnlock() - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) - return - } + exist, err = handler.config.pauseLeaderTransferIfStoreNotExist(id) + if err != nil { + handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + return + } + } + + batch := handler.config.getBatch() + batchFloat, ok := input["batch"].(float64) + if ok { + if batchFloat < 1 || batchFloat > 10 { + handler.rd.JSON(w, http.StatusBadRequest, "batch is invalid, it should be in [1, 10]") + return } - handler.config.RUnlock() - args = append(args, strconv.FormatUint(id, 10)) + batch = (int)(batchFloat) } ranges, ok := (input["ranges"]).([]string) if ok { - args = append(args, ranges...) - } else if exists { - args = append(args, handler.config.getRanges(id)...) + if !inputHasStoreID { + handler.rd.JSON(w, http.StatusInternalServerError, errs.ErrSchedulerConfig.FastGenByArgs("id")) + return + } + } else if exist { + ranges = handler.config.getRanges(id) } - err := handler.config.BuildWithArgs(args) + newRanges, err = getKeyRanges(ranges) if err != nil { - handler.rd.JSON(w, http.StatusBadRequest, err.Error()) + handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - err = handler.config.Persist() + + err = handler.config.update(id, newRanges, batch) if err != nil { - handler.config.removeStore(id) handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return } @@ -406,33 +480,17 @@ func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.R return } - var resp any - keyRanges := handler.config.getKeyRangesByID(id) - succ, last := handler.config.removeStore(id) - if succ { - err = handler.config.Persist() - if err != nil { - handler.config.resetStore(id, keyRanges) + resp, err := handler.config.delete(id) + if err != nil { + if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) || errors.ErrorEqual(err, errs.ErrScheduleConfigNotExist.FastGenByArgs()) { + handler.rd.JSON(w, http.StatusNotFound, err.Error()) + } else { handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) - return - } - if last { - if err := handler.config.removeSchedulerCb(EvictLeaderName); err != nil { - if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { - handler.rd.JSON(w, http.StatusNotFound, err.Error()) - } else { - handler.config.resetStore(id, keyRanges) - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) - } - return - } - resp = lastStoreDeleteInfo } - handler.rd.JSON(w, http.StatusOK, resp) return } - handler.rd.JSON(w, http.StatusNotFound, errs.ErrScheduleConfigNotExist.FastGenByArgs().Error()) + handler.rd.JSON(w, http.StatusOK, resp) } func newEvictLeaderHandler(config *evictLeaderSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index a91b1c3c937..63f7cde3b15 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -89,18 +89,43 @@ func TestConfigClone(t *testing.T) { emptyConf := &evictLeaderSchedulerConfig{StoreIDWithRanges: make(map[uint64][]core.KeyRange)} con2 := emptyConf.Clone() - re.Empty(emptyConf.getKeyRangesByID(1)) - re.NoError(con2.BuildWithArgs([]string{"1"})) - re.NotEmpty(con2.getKeyRangesByID(1)) - re.Empty(emptyConf.getKeyRangesByID(1)) + re.Empty(con2.getKeyRangesByID(1)) + con2.StoreIDWithRanges[1], _ = getKeyRanges([]string{"a", "b", "c", "d"}) con3 := con2.Clone() - con3.StoreIDWithRanges[1], _ = getKeyRanges([]string{"a", "b", "c", "d"}) - re.Empty(emptyConf.getKeyRangesByID(1)) - re.NotEqual(len(con3.getRanges(1)), len(con2.getRanges(1))) + re.Equal(len(con3.getRanges(1)), len(con2.getRanges(1))) + con3.StoreIDWithRanges[1][0].StartKey = []byte("aaa") con4 := con3.Clone() re.True(bytes.Equal(con4.StoreIDWithRanges[1][0].StartKey, con3.StoreIDWithRanges[1][0].StartKey)) - con4.StoreIDWithRanges[1][0].StartKey = []byte("aaa") - re.False(bytes.Equal(con4.StoreIDWithRanges[1][0].StartKey, con3.StoreIDWithRanges[1][0].StartKey)) + + con4.Batch = 10 + con5 := con4.Clone() + re.Equal(con5.getBatch(), con4.getBatch()) +} + +func TestBatchEvict(t *testing.T) { + re := require.New(t) + cancel, _, tc, oc := prepareSchedulersTest() + defer cancel() + + // Add stores 1, 2, 3 + tc.AddLeaderStore(1, 0) + tc.AddLeaderStore(2, 0) + tc.AddLeaderStore(3, 0) + // the random might be the same, so we add 1000 regions to make sure the batch is full + for i := 1; i <= 1000; i++ { + tc.AddLeaderRegion(uint64(i), 1, 2, 3) + } + tc.AddLeaderRegion(6, 2, 1, 3) + tc.AddLeaderRegion(7, 3, 1, 2) + + sl, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) + re.NoError(err) + re.True(sl.IsScheduleAllowed(tc)) + ops, _ := sl.Schedule(tc, false) + re.Len(ops, 3) + sl.(*evictLeaderScheduler).conf.Batch = 5 + ops, _ = sl.Schedule(tc, false) + re.Len(ops, 5) } diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index b4cc79e782a..c9f10fa610f 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -96,6 +96,10 @@ func (conf *evictSlowStoreSchedulerConfig) getKeyRangesByID(id uint64) []core.Ke return []core.KeyRange{core.NewKeyRange("", "")} } +func (*evictSlowStoreSchedulerConfig) getBatch() int { + return EvictLeaderBatchSize +} + func (conf *evictSlowStoreSchedulerConfig) evictStore() uint64 { if len(conf.getStores()) == 0 { return 0 @@ -263,7 +267,7 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.SchedulerClust } func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator { - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) + return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf) } func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index da3dbc24e95..dc2266b5540 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -110,6 +110,10 @@ func (conf *evictSlowTrendSchedulerConfig) getKeyRangesByID(id uint64) []core.Ke return []core.KeyRange{core.NewKeyRange("", "")} } +func (*evictSlowTrendSchedulerConfig) getBatch() int { + return EvictLeaderBatchSize +} + func (conf *evictSlowTrendSchedulerConfig) hasEvictedStores() bool { conf.RLock() defer conf.RUnlock() @@ -370,7 +374,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerClus return nil } storeSlowTrendEvictedStatusGauge.WithLabelValues(store.GetAddress(), strconv.FormatUint(store.GetID(), 10)).Set(1) - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize) + return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf) } func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 6bca686404d..777c8b3d625 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -137,6 +137,7 @@ func schedulersRegister() { return err } conf.StoreIDWithRanges[id] = ranges + conf.Batch = EvictLeaderBatchSize return nil } }) diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 5a603515942..48040841c76 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -148,8 +148,8 @@ func TestRemoveRejectLeader(t *testing.T) { el, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) re.NoError(err) tc.DeleteStore(tc.GetStore(1)) - succ, _ := el.(*evictLeaderScheduler).conf.removeStore(1) - re.True(succ) + _, err = el.(*evictLeaderScheduler).conf.removeStoreLocked(1) + re.NoError(err) } func TestShuffleHotRegionScheduleBalance(t *testing.T) { diff --git a/tools/pd-ctl/pdctl/command/scheduler.go b/tools/pd-ctl/pdctl/command/scheduler.go index c1db24cc176..4c85bb64037 100644 --- a/tools/pd-ctl/pdctl/command/scheduler.go +++ b/tools/pd-ctl/pdctl/command/scheduler.go @@ -592,6 +592,10 @@ func newConfigEvictLeaderCommand() *cobra.Command { Use: "delete-store ", Short: "delete a store from evict leader list", Run: func(cmd *cobra.Command, args []string) { deleteStoreFromSchedulerConfig(cmd, c.Name(), args) }, + }, &cobra.Command{ + Use: "set ", + Short: "set the config item", + Run: func(cmd *cobra.Command, args []string) { postSchedulerConfigCommandFunc(cmd, c.Name(), args) }, }) return c } diff --git a/tools/pd-ctl/tests/scheduler/scheduler_test.go b/tools/pd-ctl/tests/scheduler/scheduler_test.go index 3f58175b5fa..3a6e29f3586 100644 --- a/tools/pd-ctl/tests/scheduler/scheduler_test.go +++ b/tools/pd-ctl/tests/scheduler/scheduler_test.go @@ -140,7 +140,7 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *pdTests.TestCluster) { testutil.Eventually(re, func() bool { configInfo := make(map[string]any) mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", schedulerName}, &configInfo) - return reflect.DeepEqual(expectedConfig, configInfo) + return reflect.DeepEqual(expectedConfig["store-id-ranges"], configInfo["store-id-ranges"]) }) } @@ -530,6 +530,27 @@ func (suite *schedulerTestSuite) checkSchedulerConfig(cluster *pdTests.TestClust return !strings.Contains(echo, "shuffle-hot-region-scheduler") }) + // test evict leader scheduler + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "add", "evict-leader-scheduler", "1"}, nil) + re.Contains(echo, "Success!") + testutil.Eventually(re, func() bool { + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, nil) + return strings.Contains(echo, "evict-leader-scheduler") + }) + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "evict-leader-scheduler", "set", "batch", "5"}, nil) + re.Contains(echo, "Success!") + conf = make(map[string]any) + testutil.Eventually(re, func() bool { + mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "evict-leader-scheduler"}, &conf) + return conf["batch"] == 5. + }) + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", "evict-leader-scheduler-1"}, nil) + re.Contains(echo, "Success!") + testutil.Eventually(re, func() bool { + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, nil) + return !strings.Contains(echo, "evict-leader-scheduler") + }) + // test balance leader config conf = make(map[string]any) conf1 := make(map[string]any) From c65577c7bbd7b810c637c293bf5e21bacaa7a95e Mon Sep 17 00:00:00 2001 From: JmPotato Date: Thu, 18 Jul 2024 16:52:30 +0800 Subject: [PATCH 09/20] client: make TSO client request duration include failed requests (#8410) ref tikv/pd#8281 Make TSO client request duration include failed requests. Signed-off-by: JmPotato Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- client/metrics.go | 8 ++++++-- client/tso_stream.go | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/client/metrics.go b/client/metrics.go index f3c47d7e787..a11362669b3 100644 --- a/client/metrics.go +++ b/client/metrics.go @@ -159,11 +159,13 @@ var ( cmdFailedDurationUpdateServiceGCSafePoint prometheus.Observer cmdFailedDurationLoadKeyspace prometheus.Observer cmdFailedDurationUpdateKeyspaceState prometheus.Observer - requestDurationTSO prometheus.Observer cmdFailedDurationGet prometheus.Observer cmdFailedDurationPut prometheus.Observer cmdFailedDurationUpdateGCSafePointV2 prometheus.Observer cmdFailedDurationUpdateServiceSafePointV2 prometheus.Observer + + requestDurationTSO prometheus.Observer + requestFailedDurationTSO prometheus.Observer ) func initCmdDurations() { @@ -207,11 +209,13 @@ func initCmdDurations() { cmdFailedDurationUpdateServiceGCSafePoint = cmdFailedDuration.WithLabelValues("update_service_gc_safe_point") cmdFailedDurationLoadKeyspace = cmdFailedDuration.WithLabelValues("load_keyspace") cmdFailedDurationUpdateKeyspaceState = cmdFailedDuration.WithLabelValues("update_keyspace_state") - requestDurationTSO = requestDuration.WithLabelValues("tso") cmdFailedDurationGet = cmdFailedDuration.WithLabelValues("get") cmdFailedDurationPut = cmdFailedDuration.WithLabelValues("put") cmdFailedDurationUpdateGCSafePointV2 = cmdFailedDuration.WithLabelValues("update_gc_safe_point_v2") cmdFailedDurationUpdateServiceSafePointV2 = cmdFailedDuration.WithLabelValues("update_service_safe_point_v2") + + requestDurationTSO = requestDuration.WithLabelValues("tso") + requestFailedDurationTSO = requestDuration.WithLabelValues("tso-failed") } func registerMetrics() { diff --git a/client/tso_stream.go b/client/tso_stream.go index dd5b9422aae..9c4d78dfe18 100644 --- a/client/tso_stream.go +++ b/client/tso_stream.go @@ -141,7 +141,9 @@ func (s *pdTSOStream) processRequests( } tsoBatchSendLatency.Observe(time.Since(batchStartTime).Seconds()) resp, err := s.stream.Recv() + duration := time.Since(start).Seconds() if err != nil { + requestFailedDurationTSO.Observe(duration) if err == io.EOF { err = errs.ErrClientTSOStreamClosed } else { @@ -149,7 +151,7 @@ func (s *pdTSOStream) processRequests( } return } - requestDurationTSO.Observe(time.Since(start).Seconds()) + requestDurationTSO.Observe(duration) tsoBatchSize.Observe(float64(count)) if resp.GetCount() != uint32(count) { @@ -197,7 +199,9 @@ func (s *tsoTSOStream) processRequests( } tsoBatchSendLatency.Observe(time.Since(batchStartTime).Seconds()) resp, err := s.stream.Recv() + duration := time.Since(start).Seconds() if err != nil { + requestFailedDurationTSO.Observe(duration) if err == io.EOF { err = errs.ErrClientTSOStreamClosed } else { @@ -205,7 +209,7 @@ func (s *tsoTSOStream) processRequests( } return } - requestDurationTSO.Observe(time.Since(start).Seconds()) + requestDurationTSO.Observe(duration) tsoBatchSize.Observe(float64(count)) if resp.GetCount() != uint32(count) { From 3bb69e66dffcf071528b4d0c47af6bd777c5f1dd Mon Sep 17 00:00:00 2001 From: Sparkle <1284531+baurine@users.noreply.github.com> Date: Thu, 18 Jul 2024 18:04:32 +0800 Subject: [PATCH 10/20] chore(dashboard): update TiDB Dashboard to v8.3.0-e6e78c7c [master] (#8420) ref tikv/pd#4257 Signed-off-by: baurine <2008.hbl@gmail.com> --- go.mod | 2 +- go.sum | 4 ++-- scripts/dashboard-version | 2 +- tests/integrations/go.mod | 2 +- tests/integrations/go.sum | 4 ++-- tools/go.mod | 2 +- tools/go.sum | 4 ++-- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 5b8074d285b..aaf0ed1a435 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/pingcap/kvproto v0.0.0-20240716095229-5f7ffec83ea7 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 - github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 + github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b github.com/prometheus/client_golang v1.19.0 github.com/prometheus/common v0.51.1 github.com/sasha-s/go-deadlock v0.2.0 diff --git a/go.sum b/go.sum index baffeb0d6b7..480f99af33e 100644 --- a/go.sum +++ b/go.sum @@ -378,8 +378,8 @@ github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 h1:QV6jqlfOkh8hqvEAgwBZa+4bSgO0EeKC7s5c6Luam2I= github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21/go.mod h1:QYnjfA95ZaMefyl1NO8oPtKeb8pYUdnDVhQgf+qdpjM= -github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 h1:01flLztcoWBeT5pe69Q8LAB2Hty0s9Rqc3RvHU4AQK8= -github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441/go.mod h1:ucZBRz52icb23T/5Z4CsuUHmarYiin7p2MeiVBe+o8c= +github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b h1:MKgJ9yCQxD5ewLERuoiiD9XVOHuqZ2WRZnB20yMiKyo= +github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b/go.mod h1:ucZBRz52icb23T/5Z4CsuUHmarYiin7p2MeiVBe+o8c= github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e h1:FBaTXU8C3xgt/drM58VHxojHo/QoG1oPsgWTGvaSpO4= github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= diff --git a/scripts/dashboard-version b/scripts/dashboard-version index 08a22137df5..42ff906db4e 100644 --- a/scripts/dashboard-version +++ b/scripts/dashboard-version @@ -1,3 +1,3 @@ # This file is updated by running scripts/update-dashboard.sh # Don't edit it manullay -8.2.0-91f6c281 +8.3.0-e6e78c7c diff --git a/tests/integrations/go.mod b/tests/integrations/go.mod index a9f996417a4..9076d4ed256 100644 --- a/tests/integrations/go.mod +++ b/tests/integrations/go.mod @@ -125,7 +125,7 @@ require ( github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d // indirect github.com/pingcap/errcode v0.3.0 // indirect github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 // indirect - github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 // indirect + github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b // indirect github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/tests/integrations/go.sum b/tests/integrations/go.sum index b46c01e77cc..be23ca15174 100644 --- a/tests/integrations/go.sum +++ b/tests/integrations/go.sum @@ -375,8 +375,8 @@ github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 h1:QV6jqlfOkh8hqvEAgwBZa+4bSgO0EeKC7s5c6Luam2I= github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21/go.mod h1:QYnjfA95ZaMefyl1NO8oPtKeb8pYUdnDVhQgf+qdpjM= -github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 h1:01flLztcoWBeT5pe69Q8LAB2Hty0s9Rqc3RvHU4AQK8= -github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441/go.mod h1:ucZBRz52icb23T/5Z4CsuUHmarYiin7p2MeiVBe+o8c= +github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b h1:MKgJ9yCQxD5ewLERuoiiD9XVOHuqZ2WRZnB20yMiKyo= +github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b/go.mod h1:ucZBRz52icb23T/5Z4CsuUHmarYiin7p2MeiVBe+o8c= github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e h1:FBaTXU8C3xgt/drM58VHxojHo/QoG1oPsgWTGvaSpO4= github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= diff --git a/tools/go.mod b/tools/go.mod index af187b4999c..ed6813f0bbd 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -127,7 +127,7 @@ require ( github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d // indirect github.com/pingcap/errcode v0.3.0 // indirect github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 // indirect - github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 // indirect + github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b // indirect github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect diff --git a/tools/go.sum b/tools/go.sum index f508ca92384..e9d47a8b02b 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -372,8 +372,8 @@ github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 h1:QV6jqlfOkh8hqvEAgwBZa+4bSgO0EeKC7s5c6Luam2I= github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21/go.mod h1:QYnjfA95ZaMefyl1NO8oPtKeb8pYUdnDVhQgf+qdpjM= -github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441 h1:01flLztcoWBeT5pe69Q8LAB2Hty0s9Rqc3RvHU4AQK8= -github.com/pingcap/tidb-dashboard v0.0.0-20240612100141-91f6c281e441/go.mod h1:ucZBRz52icb23T/5Z4CsuUHmarYiin7p2MeiVBe+o8c= +github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b h1:MKgJ9yCQxD5ewLERuoiiD9XVOHuqZ2WRZnB20yMiKyo= +github.com/pingcap/tidb-dashboard v0.0.0-20240718034516-e6e78c7c120b/go.mod h1:ucZBRz52icb23T/5Z4CsuUHmarYiin7p2MeiVBe+o8c= github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e h1:FBaTXU8C3xgt/drM58VHxojHo/QoG1oPsgWTGvaSpO4= github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= From 1c86ddb8919b58cb976ddb9b87ef868abb07b0be Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 19 Jul 2024 13:24:01 +0800 Subject: [PATCH 11/20] tools/ut: add -v to help diagnose (#8421) ref tikv/pd#7969 Signed-off-by: husharp --- tools/pd-ut/ut.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/pd-ut/ut.go b/tools/pd-ut/ut.go index de62151fcb3..dcf0c17c686 100644 --- a/tools/pd-ut/ut.go +++ b/tools/pd-ut/ut.go @@ -673,6 +673,8 @@ func failureCases(input []JUnitTestCase) int { func (*numa) testCommand(pkg string, fn string) *exec.Cmd { args := make([]string, 0, 10) + // let the test run in the verbose mode. + args = append(args, "-test.v") exe := "./" + testFileName(pkg) if coverProfile != "" { fileName := strings.ReplaceAll(pkg, "/", "_") + "." + fn From c514937500f0194315720e03a60f89ffd500d604 Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 19 Jul 2024 15:06:01 +0800 Subject: [PATCH 12/20] resourcemanager: make TestResourceManagerServer more stable (#8418) close tikv/pd#8417 Signed-off-by: husharp Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- tests/integrations/mcs/resourcemanager/server_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integrations/mcs/resourcemanager/server_test.go b/tests/integrations/mcs/resourcemanager/server_test.go index eaeef99e9d6..5ab8745f7be 100644 --- a/tests/integrations/mcs/resourcemanager/server_test.go +++ b/tests/integrations/mcs/resourcemanager/server_test.go @@ -25,6 +25,7 @@ import ( rmpb "github.com/pingcap/kvproto/pkg/resource_manager" "github.com/stretchr/testify/require" "github.com/tikv/pd/client/grpcutil" + bs "github.com/tikv/pd/pkg/basicserver" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/versioninfo" "github.com/tikv/pd/tests" @@ -49,6 +50,7 @@ func TestResourceManagerServer(t *testing.T) { s, cleanup := tests.StartSingleResourceManagerTestServer(ctx, re, leader.GetAddr(), tempurl.Alloc()) addr := s.GetAddr() defer cleanup() + tests.WaitForPrimaryServing(re, map[string]bs.Server{addr: s}) // Test registered GRPC Service cc, err := grpcutil.GetClientConn(ctx, addr, nil) From a17ff35da8f4c5b813ab9b4dc73d472d66389d31 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 19 Jul 2024 18:04:31 +0800 Subject: [PATCH 13/20] *: upgrade go-graphviz (#8423) close tikv/pd#8422 Signed-off-by: Ryan Leung --- go.mod | 20 +++++++-------- go.sum | 51 +++++++++++++++------------------------ tests/integrations/go.mod | 16 ++++++------ tests/integrations/go.sum | 47 ++++++++++++++---------------------- tools/go.mod | 16 ++++++------ tools/go.sum | 47 ++++++++++++++---------------------- 6 files changed, 82 insertions(+), 115 deletions(-) diff --git a/go.mod b/go.mod index aaf0ed1a435..aee8cc7afad 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 golang.org/x/time v0.5.0 - golang.org/x/tools v0.14.0 + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/grpc v1.62.1 gotest.tools/gotestsum v1.7.0 ) @@ -109,7 +109,7 @@ require ( github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/go-resty/resty/v2 v2.6.0 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect - github.com/goccy/go-graphviz v0.0.9 // indirect + github.com/goccy/go-graphviz v0.1.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect @@ -175,15 +175,15 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/image v0.10.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/image v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.16.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect diff --git a/go.sum b/go.sum index 480f99af33e..3bbddb05476 100644 --- a/go.sum +++ b/go.sum @@ -170,8 +170,8 @@ github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-graphviz v0.0.9 h1:s/FMMJ1Joj6La3S5ApO3Jk2cwM4LpXECC2muFx3IPQQ= -github.com/goccy/go-graphviz v0.0.9/go.mod h1:wXVsXxmyMQU6TN3zGRttjNn3h+iCAS7xQFC6TlNvLhk= +github.com/goccy/go-graphviz v0.1.3 h1:Pkt8y4FBnBNI9tfSobpoN5qy1qMNqRXPQYvLhaSUasY= +github.com/goccy/go-graphviz v0.1.3/go.mod h1:pMYpbAqJT10V8dzV1JN/g/wUlG/0imKPzn3ZsrchGCI= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -261,7 +261,6 @@ github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -556,18 +555,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.10.0 h1:gXjUUtwtx5yOE0VKWq1CH4IJAClq4UGgUA3i+rpON9M= -golang.org/x/image v0.10.0/go.mod h1:jtrku+n79PfroUbvDdeUWMAI+heR786BofxrbiSF+J0= +golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= +golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -580,9 +577,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -604,9 +600,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= @@ -619,9 +614,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -655,25 +649,21 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -700,9 +690,8 @@ golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tests/integrations/go.mod b/tests/integrations/go.mod index 9076d4ed256..f56b1cdc32a 100644 --- a/tests/integrations/go.mod +++ b/tests/integrations/go.mod @@ -83,7 +83,7 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/go-resty/resty/v2 v2.6.0 // indirect - github.com/goccy/go-graphviz v0.0.9 // indirect + github.com/goccy/go-graphviz v0.1.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect @@ -165,16 +165,16 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect - golang.org/x/image v0.10.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/image v0.18.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect diff --git a/tests/integrations/go.sum b/tests/integrations/go.sum index be23ca15174..e23f71d41ce 100644 --- a/tests/integrations/go.sum +++ b/tests/integrations/go.sum @@ -164,8 +164,8 @@ github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-graphviz v0.0.9 h1:s/FMMJ1Joj6La3S5ApO3Jk2cwM4LpXECC2muFx3IPQQ= -github.com/goccy/go-graphviz v0.0.9/go.mod h1:wXVsXxmyMQU6TN3zGRttjNn3h+iCAS7xQFC6TlNvLhk= +github.com/goccy/go-graphviz v0.1.3 h1:Pkt8y4FBnBNI9tfSobpoN5qy1qMNqRXPQYvLhaSUasY= +github.com/goccy/go-graphviz v0.1.3/go.mod h1:pMYpbAqJT10V8dzV1JN/g/wUlG/0imKPzn3ZsrchGCI= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -253,7 +253,6 @@ github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -547,18 +546,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.10.0 h1:gXjUUtwtx5yOE0VKWq1CH4IJAClq4UGgUA3i+rpON9M= -golang.org/x/image v0.10.0/go.mod h1:jtrku+n79PfroUbvDdeUWMAI+heR786BofxrbiSF+J0= +golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= +golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -572,9 +569,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -597,9 +593,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= @@ -612,9 +607,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -645,23 +639,19 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -686,9 +676,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tools/go.mod b/tools/go.mod index ed6813f0bbd..caafef12b87 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -35,8 +35,8 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 - golang.org/x/text v0.14.0 - golang.org/x/tools v0.14.0 + golang.org/x/text v0.16.0 + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/grpc v1.62.1 ) @@ -86,7 +86,7 @@ require ( github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/go-resty/resty/v2 v2.6.0 // indirect github.com/go-sql-driver/mysql v1.7.0 // indirect - github.com/goccy/go-graphviz v0.0.9 // indirect + github.com/goccy/go-graphviz v0.1.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect @@ -165,13 +165,13 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect - golang.org/x/image v0.10.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/image v0.18.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect diff --git a/tools/go.sum b/tools/go.sum index e9d47a8b02b..d9bc29aebff 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -163,8 +163,8 @@ github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-graphviz v0.0.9 h1:s/FMMJ1Joj6La3S5ApO3Jk2cwM4LpXECC2muFx3IPQQ= -github.com/goccy/go-graphviz v0.0.9/go.mod h1:wXVsXxmyMQU6TN3zGRttjNn3h+iCAS7xQFC6TlNvLhk= +github.com/goccy/go-graphviz v0.1.3 h1:Pkt8y4FBnBNI9tfSobpoN5qy1qMNqRXPQYvLhaSUasY= +github.com/goccy/go-graphviz v0.1.3/go.mod h1:pMYpbAqJT10V8dzV1JN/g/wUlG/0imKPzn3ZsrchGCI= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -254,7 +254,6 @@ github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -549,19 +548,17 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.10.0 h1:gXjUUtwtx5yOE0VKWq1CH4IJAClq4UGgUA3i+rpON9M= -golang.org/x/image v0.10.0/go.mod h1:jtrku+n79PfroUbvDdeUWMAI+heR786BofxrbiSF+J0= +golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= +golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -574,9 +571,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -598,9 +594,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= @@ -613,9 +608,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -646,23 +640,19 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -687,9 +677,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 2d8e03f3380c7476abdbd13210c176be1e1c48d9 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 19 Jul 2024 18:19:30 +0800 Subject: [PATCH 14/20] *: fix redact log (#8415) close tikv/pd#8419 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/keyspace/keyspace.go | 5 +++-- pkg/schedule/labeler/rules.go | 36 ++++++++++++++++++++++++++++++++ server/cluster/cluster_worker.go | 4 ++-- 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/pkg/keyspace/keyspace.go b/pkg/keyspace/keyspace.go index 26fd4db10f0..bcee8e75dea 100644 --- a/pkg/keyspace/keyspace.go +++ b/pkg/keyspace/keyspace.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" ) @@ -333,7 +334,7 @@ func (manager *Manager) splitKeyspaceRegion(id uint32, waitRegionSplit bool) (er if waitRegionSplit { ranges := keyspaceRule.Data.([]*labeler.KeyRangeRule) if len(ranges) < 2 { - log.Warn("[keyspace] failed to split keyspace region with insufficient range", zap.Any("label-rule", keyspaceRule)) + log.Warn("[keyspace] failed to split keyspace region with insufficient range", logutil.ZapRedactString("label-rule", keyspaceRule.String())) return ErrRegionSplitFailed } rawLeftBound, rawRightBound := ranges[0].StartKey, ranges[0].EndKey @@ -382,7 +383,7 @@ func (manager *Manager) splitKeyspaceRegion(id uint32, waitRegionSplit bool) (er log.Info("[keyspace] added region label for keyspace", zap.Uint32("keyspace-id", id), - zap.Any("label-rule", keyspaceRule), + logutil.ZapRedactString("label-rule", keyspaceRule.String()), zap.Duration("takes", time.Since(start)), ) return diff --git a/pkg/schedule/labeler/rules.go b/pkg/schedule/labeler/rules.go index 3462cb7c459..39a420032d8 100644 --- a/pkg/schedule/labeler/rules.go +++ b/pkg/schedule/labeler/rules.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "time" "github.com/pingcap/failpoint" @@ -38,6 +39,10 @@ type RegionLabel struct { expire *time.Time } +func (l *RegionLabel) String() string { + return fmt.Sprintf("key: %s, value: %s", l.Key, l.Value) +} + // LabelRule is the rule to assign labels to a region. // NOTE: This type is exported by HTTP API. Please pay more attention when modifying it. type LabelRule struct { @@ -49,6 +54,37 @@ type LabelRule struct { minExpire *time.Time } +func (rule *LabelRule) String() string { + var b strings.Builder + b.WriteString(fmt.Sprintf("id: %s, index: %d, type: %s", rule.ID, rule.Index, rule.RuleType)) + b.WriteString(", labels: ") + for i, l := range rule.Labels { + if i == 0 { + b.WriteString("[") + } + b.WriteString(l.String()) + if i == len(rule.Labels)-1 { + b.WriteString("]") + } else { + b.WriteString(", ") + } + } + b.WriteString(", data: ") + ranges := rule.Data.([]*KeyRangeRule) + for i, r := range ranges { + if i == 0 { + b.WriteString("[") + } + b.WriteString(fmt.Sprintf("startKey: {%s}, endKey: {%s}", r.StartKeyHex, r.EndKeyHex)) + if i == len(ranges)-1 { + b.WriteString("]") + } else { + b.WriteString(", ") + } + } + return b.String() +} + // NewLabelRuleFromJSON creates a label rule from the JSON data. func NewLabelRuleFromJSON(data []byte) (*LabelRule, error) { lr := &LabelRule{} diff --git a/server/cluster/cluster_worker.go b/server/cluster/cluster_worker.go index 93cf7bcbfad..2d9bb411995 100644 --- a/server/cluster/cluster_worker.go +++ b/server/cluster/cluster_worker.go @@ -238,7 +238,7 @@ func (*RaftCluster) HandleBatchReportSplit(request *pdpb.ReportBatchSplitRequest err := checkSplitRegions(regions) if err != nil { log.Warn("report batch split region is invalid", - zap.Stringer("region-meta", hrm), + logutil.ZapRedactStringer("region-meta", hrm), errs.ZapError(err)) return nil, err } @@ -247,7 +247,7 @@ func (*RaftCluster) HandleBatchReportSplit(request *pdpb.ReportBatchSplitRequest hrm = core.RegionsToHexMeta(regions[:last]) log.Info("region batch split, generate new regions", zap.Uint64("region-id", originRegion.GetId()), - zap.Stringer("origin", hrm), + logutil.ZapRedactStringer("origin", hrm), zap.Int("total", last)) return &pdpb.ReportBatchSplitResponse{}, nil } From 8de04120c14225506ebf150ed921f64fa6eb96c4 Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 19 Jul 2024 18:27:01 +0800 Subject: [PATCH 15/20] tests: add frequency check time to make resign test more stable (#8411) close tikv/pd#8319 Signed-off-by: husharp Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/member/member.go | 8 +++++++- tests/integrations/client/client_test.go | 4 ++++ tests/integrations/mcs/scheduling/server_test.go | 4 ++++ tests/server/cluster/cluster_test.go | 14 +++++++++++--- tests/server/id/id_test.go | 5 +++++ tests/server/region_syncer/region_syncer_test.go | 6 +++++- 6 files changed, 36 insertions(+), 5 deletions(-) diff --git a/pkg/member/member.go b/pkg/member/member.go index bbf46d8f167..99fc5457b71 100644 --- a/pkg/member/member.go +++ b/pkg/member/member.go @@ -187,7 +187,13 @@ func (m *EmbeddedEtcdMember) CampaignLeader(ctx context.Context, leaseTimeout in failpoint.Return(m.leadership.Campaign(leaseTimeout, m.MemberValue())) }) - if m.leadership.GetCampaignTimesNum() > campaignLeaderFrequencyTimes { + checkTimes := campaignLeaderFrequencyTimes + failpoint.Inject("changeFrequencyTimes", func(val failpoint.Value) { + if v, ok := val.(int); ok { + checkTimes = v + } + }) + if m.leadership.GetCampaignTimesNum() > checkTimes { if err := m.ResignEtcdLeader(ctx, m.Name(), ""); err != nil { return err } diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index e2cb2758b78..4138b775d7c 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -166,6 +166,10 @@ func TestClientLeaderChange(t *testing.T) { func TestLeaderTransferAndMoveCluster(t *testing.T) { re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/member/changeFrequencyTimes", "return(10)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/changeFrequencyTimes")) + }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 3) diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 3234c5cd1e6..e61955fb15f 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -652,6 +652,10 @@ func (suite *multipleServerTestSuite) TearDownSuite() { func (suite *multipleServerTestSuite) TestReElectLeader() { re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/member/changeFrequencyTimes", "return(10)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/changeFrequencyTimes")) + }() tc, err := tests.NewTestSchedulingCluster(suite.ctx, 1, suite.backendEndpoints) re.NoError(err) defer tc.Destroy() diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index dc972988b6b..5af750a3c2c 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -184,6 +184,10 @@ func TestDamagedRegion(t *testing.T) { func TestRegionStatistics(t *testing.T) { re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/member/changeFrequencyTimes", "return(10)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/changeFrequencyTimes")) + }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() tc, err := tests.NewTestCluster(ctx, 3) @@ -238,7 +242,7 @@ func TestRegionStatistics(t *testing.T) { re.Len(regions, 1) leaderServer.ResignLeader() - re.NotEqual(tc.WaitLeader(), leaderName) + re.NotEqual(leaderName, tc.WaitLeader()) leaderServer = tc.GetLeaderServer() leaderName = leaderServer.GetServer().Name() rc = leaderServer.GetRaftCluster() @@ -255,11 +259,11 @@ func TestRegionStatistics(t *testing.T) { re.False(r.LoadedFromStorage() && r.LoadedFromSync()) leaderServer.ResignLeader() - re.NotEqual(tc.WaitLeader(), leaderName) + re.NotEqual(leaderName, tc.WaitLeader()) leaderServer = tc.GetLeaderServer() leaderName = leaderServer.GetServer().Name() leaderServer.ResignLeader() - re.NotEqual(tc.WaitLeader(), leaderName) + re.NotEqual(leaderName, tc.WaitLeader()) rc = tc.GetLeaderServer().GetRaftCluster() r = rc.GetRegion(region.Id) re.NotNil(r) @@ -1643,6 +1647,10 @@ func TestMinResolvedTS(t *testing.T) { // See https://github.com/tikv/pd/issues/4941 func TestTransferLeaderBack(t *testing.T) { re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/member/changeFrequencyTimes", "return(10)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/changeFrequencyTimes")) + }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() tc, err := tests.NewTestCluster(ctx, 2) diff --git a/tests/server/id/id_test.go b/tests/server/id/id_test.go index 6a7c1a9a028..c7dee0d2924 100644 --- a/tests/server/id/id_test.go +++ b/tests/server/id/id_test.go @@ -19,6 +19,7 @@ import ( "sync" "testing" + "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/utils/syncutil" @@ -107,6 +108,10 @@ func TestCommand(t *testing.T) { func TestMonotonicID(t *testing.T) { re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/member/changeFrequencyTimes", "return(10)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/changeFrequencyTimes")) + }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 2) diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index 2015f520a91..6a5c1ea361c 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -255,6 +255,10 @@ func TestPrepareChecker(t *testing.T) { // ref: https://github.com/tikv/pd/issues/6988 func TestPrepareCheckerWithTransferLeader(t *testing.T) { re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/member/changeFrequencyTimes", "return(10)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/changeFrequencyTimes")) + }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) @@ -298,7 +302,7 @@ func TestPrepareCheckerWithTransferLeader(t *testing.T) { leaderServer = cluster.GetLeaderServer() err = cluster.ResignLeader() re.NoError(err) - re.NotEqual(leaderServer.GetLeader().GetName(), cluster.WaitLeader()) + re.NotEqual(leaderServer.GetServer().Name(), cluster.WaitLeader()) rc = cluster.GetLeaderServer().GetServer().GetRaftCluster() re.True(rc.IsPrepared()) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker")) From 7b014aa5cc5b29a71fde82c5b36d26e124cf4715 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Mon, 22 Jul 2024 14:21:03 +0800 Subject: [PATCH 16/20] pd-ctl: disable EnablePrefixMatching and avoid misuse of `store remove` and `store delete` (#8414) close tikv/pd#8413 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- tools/pd-ctl/pdctl/command/store_command.go | 6 +++++- tools/pd-ctl/pdctl/ctl.go | 2 +- tools/pd-ctl/tests/store/store_test.go | 7 ++++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/pd-ctl/pdctl/command/store_command.go b/tools/pd-ctl/pdctl/command/store_command.go index bc024d5a2e6..188d36e25de 100644 --- a/tools/pd-ctl/pdctl/command/store_command.go +++ b/tools/pd-ctl/pdctl/command/store_command.go @@ -706,7 +706,11 @@ func showAllStoresLimitCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func removeTombStoneCommandFunc(cmd *cobra.Command, _ []string) { +func removeTombStoneCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 0 { + cmd.Usage() + return + } prefix := path.Join(storesPrefix, "remove-tombstone") _, err := doRequest(cmd, prefix, http.MethodDelete, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/ctl.go b/tools/pd-ctl/pdctl/ctl.go index fbacd65dc53..5f8c6485c42 100644 --- a/tools/pd-ctl/pdctl/ctl.go +++ b/tools/pd-ctl/pdctl/ctl.go @@ -29,7 +29,7 @@ import ( ) func init() { - cobra.EnablePrefixMatching = true + cobra.EnablePrefixMatching = false cobra.EnableTraverseRunHooks = true } diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index 0f522b8bd44..2e1e7ac9444 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -339,9 +339,14 @@ func TestStore(t *testing.T) { // store delete command storeInfo.Store.State = metapb.StoreState(metapb.StoreState_value[storeInfo.Store.StateName]) re.Equal(metapb.StoreState_Up, storeInfo.Store.State) + args = []string{"-u", pdAddr, "store", "remove", "1"} // it means remove-tombstone + output, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + re.NotContains(string(output), "Success") args = []string{"-u", pdAddr, "store", "delete", "1"} - _, err = tests.ExecuteCommand(cmd, args...) + output, err = tests.ExecuteCommand(cmd, args...) re.NoError(err) + re.Contains(string(output), "Success") args = []string{"-u", pdAddr, "store", "1"} output, err = tests.ExecuteCommand(cmd, args...) re.NoError(err) From 0985e2e89f9f99e030192cfe774f63faeee2555d Mon Sep 17 00:00:00 2001 From: ShuNing Date: Mon, 22 Jul 2024 14:30:03 +0800 Subject: [PATCH 17/20] resourcemanger: add more test about assign tokens and comments (#8399) ref tikv/pd#4399 resourcemanger: add more test about assign tokens and comments Signed-off-by: nolouch Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- .../resourcemanager/server/token_buckets.go | 5 ++ .../server/token_buckets_test.go | 64 +++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/pkg/mcs/resourcemanager/server/token_buckets.go b/pkg/mcs/resourcemanager/server/token_buckets.go index f0b733a14ca..e0777b419eb 100644 --- a/pkg/mcs/resourcemanager/server/token_buckets.go +++ b/pkg/mcs/resourcemanager/server/token_buckets.go @@ -409,6 +409,11 @@ func (ts *TokenSlot) assignSlotTokens(requiredToken float64, targetPeriodMs uint // | // grant_rate 0 ------------------------------------------------------------------------------------ // loan *** k*period_token (k+k-1)*period_token *** (k+k+1...+1)*period_token + + // loanCoefficient is relative to the capacity of load RUs. + // It's like a buffer to slow down the client consumption. the buffer capacity is `(1 + 2 ... +loanCoefficient) * fillRate * targetPeriodTimeSec`. + // Details see test case `TestGroupTokenBucketRequestLoop`. + p := make([]float64, loanCoefficient) p[0] = float64(loanCoefficient) * float64(fillRate) * targetPeriodTimeSec for i := 1; i < loanCoefficient; i++ { diff --git a/pkg/mcs/resourcemanager/server/token_buckets_test.go b/pkg/mcs/resourcemanager/server/token_buckets_test.go index b04a535fc74..8ac3ec4a3ba 100644 --- a/pkg/mcs/resourcemanager/server/token_buckets_test.go +++ b/pkg/mcs/resourcemanager/server/token_buckets_test.go @@ -15,6 +15,7 @@ package server import ( + "fmt" "math" "testing" "time" @@ -123,3 +124,66 @@ func TestGroupTokenBucketRequest(t *testing.T) { re.LessOrEqual(math.Abs(tb.Tokens-20000), 1e-7) re.Equal(int64(time.Second)*10/int64(time.Millisecond), trickle) } + +func TestGroupTokenBucketRequestLoop(t *testing.T) { + re := require.New(t) + tbSetting := &rmpb.TokenBucket{ + Tokens: 50000, + Settings: &rmpb.TokenLimitSettings{ + FillRate: 2000, + BurstLimit: 200000, + }, + } + + gtb := NewGroupTokenBucket(tbSetting) + clientUniqueID := uint64(0) + initialTime := time.Now() + + // Initialize the token bucket + gtb.init(initialTime, clientUniqueID) + gtb.Tokens = 50000 + + const timeIncrement = 5 * time.Second + const targetPeriod = 5 * time.Second + const defaultTrickleMs = int64(targetPeriod) / int64(time.Millisecond) + + // Define the test cases in a table + testCases := []struct { + requestTokens float64 + assignedTokens float64 + globalBucketTokensAfterAssign float64 + expectedTrickleMs int64 + }{ + /* requestTokens, assignedTokens, globalBucketTokensAfterAssign, TrickleMs */ + {50000, 50000, 0, 0}, + {50000, 30000, -20000, defaultTrickleMs}, + {30000, 15000, -25000, defaultTrickleMs}, + {15000, 12500, -27500, defaultTrickleMs}, + {12500, 11250, -28750, defaultTrickleMs}, + {11250, 10625, -29375, defaultTrickleMs}, + // RU_PER_SEC is close to 2000, RU_PER_SEC = assignedTokens / TrickleMs / 1000. + {10625, 10312.5, -29687.5, defaultTrickleMs}, + {10312.5, 10156.25, -29843.75, defaultTrickleMs}, + {10156.25, 10078.125, -29921.875, defaultTrickleMs}, + {10078.125, 10039.0625, -29960.9375, defaultTrickleMs}, + {10039.0625, 10019.53125, -29980.46875, defaultTrickleMs}, + {10019.53125, 10009.765625, -29990.234375, defaultTrickleMs}, + {10009.765625, 10004.8828125, -29995.1171875, defaultTrickleMs}, + {10004.8828125, 10002.44140625, -29997.55859375, defaultTrickleMs}, + {10002.44140625, 10001.220703125, -29998.779296875, defaultTrickleMs}, + {10001.220703125, 10000.6103515625, -29999.3896484375, defaultTrickleMs}, + {10000.6103515625, 10000.30517578125, -29999.69482421875, defaultTrickleMs}, + {10000.30517578125, 10000.152587890625, -29999.847412109375, defaultTrickleMs}, + {10000.152587890625, 10000.0762939453125, -29999.9237060546875, defaultTrickleMs}, + {10000.0762939453125, 10000.038146972656, -29999.961853027343, defaultTrickleMs}, + } + + currentTime := initialTime + for i, tc := range testCases { + tb, trickle := gtb.request(currentTime, tc.requestTokens, uint64(targetPeriod)/uint64(time.Millisecond), clientUniqueID) + re.Equal(tc.globalBucketTokensAfterAssign, gtb.GetTokenBucket().Tokens, fmt.Sprintf("Test case %d failed: expected bucket tokens %f, got %f", i, tc.globalBucketTokensAfterAssign, gtb.GetTokenBucket().Tokens)) + re.LessOrEqual(math.Abs(tb.Tokens-tc.assignedTokens), 1e-7, fmt.Sprintf("Test case %d failed: expected tokens %f, got %f", i, tc.assignedTokens, tb.Tokens)) + re.Equal(tc.expectedTrickleMs, trickle, fmt.Sprintf("Test case %d failed: expected trickle %d, got %d", i, tc.expectedTrickleMs, trickle)) + currentTime = currentTime.Add(timeIncrement) + } +} From 7136d69a0abc3a5a74162287a0f5e0d8f3cc479e Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 22 Jul 2024 18:25:33 +0800 Subject: [PATCH 18/20] *: use context to manage runner (#8394) ref tikv/pd#8386 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mcs/scheduling/server/cluster.go | 6 +++--- pkg/ratelimit/runner.go | 26 ++++++++++++++++---------- pkg/ratelimit/runner_test.go | 7 ++++--- server/cluster/cluster.go | 6 +++--- 4 files changed, 26 insertions(+), 19 deletions(-) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index b18db7c0798..24a75012331 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -549,9 +549,9 @@ func (c *Cluster) StartBackgroundJobs() { go c.runUpdateStoreStats() go c.runCoordinator() go c.runMetricsCollectionJob() - c.heartbeatRunner.Start() - c.miscRunner.Start() - c.logRunner.Start() + c.heartbeatRunner.Start(c.ctx) + c.miscRunner.Start(c.ctx) + c.logRunner.Start(c.ctx) c.running.Store(true) } diff --git a/pkg/ratelimit/runner.go b/pkg/ratelimit/runner.go index 2d88e36106e..57a19e4e682 100644 --- a/pkg/ratelimit/runner.go +++ b/pkg/ratelimit/runner.go @@ -43,7 +43,7 @@ const ( // Runner is the interface for running tasks. type Runner interface { RunTask(id uint64, name string, f func(), opts ...TaskOption) error - Start() + Start(ctx context.Context) Stop() } @@ -66,12 +66,13 @@ type taskID struct { } type ConcurrentRunner struct { + ctx context.Context + cancel context.CancelFunc name string limiter *ConcurrencyLimiter maxPendingDuration time.Duration taskChan chan *Task pendingMu sync.Mutex - stopChan chan struct{} wg sync.WaitGroup pendingTaskCount map[string]int pendingTasks []*Task @@ -103,8 +104,8 @@ func WithRetained(retained bool) TaskOption { } // Start starts the runner. -func (cr *ConcurrentRunner) Start() { - cr.stopChan = make(chan struct{}) +func (cr *ConcurrentRunner) Start(ctx context.Context) { + cr.ctx, cr.cancel = context.WithCancel(ctx) cr.wg.Add(1) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() @@ -118,11 +119,11 @@ func (cr *ConcurrentRunner) Start() { if err != nil { continue } - go cr.run(task, token) + go cr.run(cr.ctx, task, token) } else { - go cr.run(task, nil) + go cr.run(cr.ctx, task, nil) } - case <-cr.stopChan: + case <-cr.ctx.Done(): cr.pendingMu.Lock() cr.pendingTasks = make([]*Task, 0, initialCapacity) cr.pendingMu.Unlock() @@ -144,8 +145,13 @@ func (cr *ConcurrentRunner) Start() { }() } -func (cr *ConcurrentRunner) run(task *Task, token *TaskToken) { +func (cr *ConcurrentRunner) run(ctx context.Context, task *Task, token *TaskToken) { start := time.Now() + select { + case <-ctx.Done(): + return + default: + } task.f() if token != nil { cr.limiter.ReleaseToken(token) @@ -173,7 +179,7 @@ func (cr *ConcurrentRunner) processPendingTasks() { // Stop stops the runner. func (cr *ConcurrentRunner) Stop() { - close(cr.stopChan) + cr.cancel() cr.wg.Wait() } @@ -238,7 +244,7 @@ func (*SyncRunner) RunTask(_ uint64, _ string, f func(), _ ...TaskOption) error } // Start starts the runner. -func (*SyncRunner) Start() {} +func (*SyncRunner) Start(context.Context) {} // Stop stops the runner. func (*SyncRunner) Stop() {} diff --git a/pkg/ratelimit/runner_test.go b/pkg/ratelimit/runner_test.go index 0335a78bcbe..d4aa0825e83 100644 --- a/pkg/ratelimit/runner_test.go +++ b/pkg/ratelimit/runner_test.go @@ -15,6 +15,7 @@ package ratelimit import ( + "context" "sync" "testing" "time" @@ -25,7 +26,7 @@ import ( func TestConcurrentRunner(t *testing.T) { t.Run("RunTask", func(t *testing.T) { runner := NewConcurrentRunner("test", NewConcurrencyLimiter(1), time.Second) - runner.Start() + runner.Start(context.TODO()) defer runner.Stop() var wg sync.WaitGroup @@ -47,7 +48,7 @@ func TestConcurrentRunner(t *testing.T) { t.Run("MaxPendingDuration", func(t *testing.T) { runner := NewConcurrentRunner("test", NewConcurrencyLimiter(1), 2*time.Millisecond) - runner.Start() + runner.Start(context.TODO()) defer runner.Stop() var wg sync.WaitGroup for i := 0; i < 10; i++ { @@ -76,7 +77,7 @@ func TestConcurrentRunner(t *testing.T) { t.Run("DuplicatedTask", func(t *testing.T) { runner := NewConcurrentRunner("test", NewConcurrencyLimiter(1), time.Minute) - runner.Start() + runner.Start(context.TODO()) defer runner.Stop() for i := 1; i < 11; i++ { regionID := uint64(i) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 93be9d1c076..ed1080f617a 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -364,9 +364,9 @@ func (c *RaftCluster) Start(s Server) error { go c.startGCTuner() c.running = true - c.heartbeatRunner.Start() - c.miscRunner.Start() - c.logRunner.Start() + c.heartbeatRunner.Start(c.ctx) + c.miscRunner.Start(c.ctx) + c.logRunner.Start(c.ctx) return nil } From 624b6f39f9617c958a120ec4ac5708c3b32c6dfb Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Tue, 23 Jul 2024 10:43:33 +0800 Subject: [PATCH 19/20] test: add Stdout to TestReloadLabel (#8427) ref tikv/pd#8389 Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- tests/integrations/realcluster/reboot_pd_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integrations/realcluster/reboot_pd_test.go b/tests/integrations/realcluster/reboot_pd_test.go index 14c86f2dedb..9f2b286e9b1 100644 --- a/tests/integrations/realcluster/reboot_pd_test.go +++ b/tests/integrations/realcluster/reboot_pd_test.go @@ -16,6 +16,7 @@ package realcluster import ( "context" + "os" "os/exec" "testing" @@ -26,6 +27,8 @@ import ( func restartTiUP() { log.Info("start to restart TiUP") cmd := exec.Command("make", "deploy") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr err := cmd.Run() if err != nil { panic(err) From 0c56739c0ecffc31f63e691ad05fc95a3bf26609 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 23 Jul 2024 19:40:34 +0800 Subject: [PATCH 20/20] *: pass context to task (#8429) close tikv/pd#8386 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/cluster/cluster.go | 16 ++++++++++++++-- pkg/core/region.go | 5 +++-- pkg/mcs/scheduling/server/cluster.go | 18 ++++++++---------- pkg/ratelimit/runner.go | 12 ++++++------ pkg/ratelimit/runner_test.go | 6 +++--- server/cluster/cluster.go | 20 +++++++++----------- 6 files changed, 43 insertions(+), 34 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 2cf5787646a..ddba8f89fb6 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -15,6 +15,8 @@ package cluster import ( + "context" + "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule" "github.com/tikv/pd/pkg/schedule/placement" @@ -56,8 +58,13 @@ func HandleStatsAsync(c Cluster, region *core.RegionInfo) { } // HandleOverlaps handles the overlap regions. -func HandleOverlaps(c Cluster, overlaps []*core.RegionInfo) { +func HandleOverlaps(ctx context.Context, c Cluster, overlaps []*core.RegionInfo) { for _, item := range overlaps { + select { + case <-ctx.Done(): + return + default: + } if c.GetRegionStats() != nil { c.GetRegionStats().ClearDefunctRegion(item.GetID()) } @@ -67,7 +74,7 @@ func HandleOverlaps(c Cluster, overlaps []*core.RegionInfo) { } // Collect collects the cluster information. -func Collect(c Cluster, region *core.RegionInfo, hasRegionStats bool) { +func Collect(ctx context.Context, c Cluster, region *core.RegionInfo, hasRegionStats bool) { if hasRegionStats { // get region again from root tree. make sure the observed region is the latest. bc := c.GetBasicCluster() @@ -78,6 +85,11 @@ func Collect(c Cluster, region *core.RegionInfo, hasRegionStats bool) { if region == nil { return } + select { + case <-ctx.Done(): + return + default: + } c.GetRegionStats().Observe(region, c.GetBasicCluster().GetRegionStores(region)) } } diff --git a/pkg/core/region.go b/pkg/core/region.go index eb8b89aecff..4f7af8cc333 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -16,6 +16,7 @@ package core import ( "bytes" + "context" "encoding/hex" "fmt" "math" @@ -750,7 +751,7 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { logRunner.RunTask( regionID, "DebugLog", - func() { + func(context.Context) { d(msg, fields...) }, ) @@ -759,7 +760,7 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { logRunner.RunTask( regionID, "InfoLog", - func() { + func(context.Context) { i(msg, fields...) }, ) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 24a75012331..c86c739f724 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -627,10 +627,8 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c ctx.TaskRunner.RunTask( regionID, ratelimit.ObserveRegionStatsAsync, - func() { - if c.regionStats.RegionStatsNeedUpdate(region) { - cluster.Collect(c, region, hasRegionStats) - } + func(ctx context.Context) { + cluster.Collect(ctx, c, region, hasRegionStats) }, ) } @@ -639,7 +637,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, - func() { + func(context.Context) { c.CheckAndPutSubTree(region) }, ratelimit.WithRetained(true), @@ -663,7 +661,7 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, - func() { + func(context.Context) { c.CheckAndPutSubTree(region) }, ratelimit.WithRetained(retained), @@ -672,8 +670,8 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c ctx.TaskRunner.RunTask( regionID, ratelimit.HandleOverlaps, - func() { - cluster.HandleOverlaps(c, overlaps) + func(ctx context.Context) { + cluster.HandleOverlaps(ctx, c, overlaps) }, ) } @@ -682,8 +680,8 @@ func (c *Cluster) processRegionHeartbeat(ctx *core.MetaProcessContext, region *c ctx.TaskRunner.RunTask( regionID, ratelimit.CollectRegionStatsAsync, - func() { - cluster.Collect(c, region, hasRegionStats) + func(ctx context.Context) { + cluster.Collect(ctx, c, region, hasRegionStats) }, ) tracer.OnCollectRegionStatsFinished() diff --git a/pkg/ratelimit/runner.go b/pkg/ratelimit/runner.go index 57a19e4e682..a230177ac73 100644 --- a/pkg/ratelimit/runner.go +++ b/pkg/ratelimit/runner.go @@ -42,7 +42,7 @@ const ( // Runner is the interface for running tasks. type Runner interface { - RunTask(id uint64, name string, f func(), opts ...TaskOption) error + RunTask(id uint64, name string, f func(context.Context), opts ...TaskOption) error Start(ctx context.Context) Stop() } @@ -51,7 +51,7 @@ type Runner interface { type Task struct { id uint64 submittedAt time.Time - f func() + f func(context.Context) name string // retained indicates whether the task should be dropped if the task queue exceeds maxPendingDuration. retained bool @@ -152,7 +152,7 @@ func (cr *ConcurrentRunner) run(ctx context.Context, task *Task, token *TaskToke return default: } - task.f() + task.f(ctx) if token != nil { cr.limiter.ReleaseToken(token) cr.processPendingTasks() @@ -184,7 +184,7 @@ func (cr *ConcurrentRunner) Stop() { } // RunTask runs the task asynchronously. -func (cr *ConcurrentRunner) RunTask(id uint64, name string, f func(), opts ...TaskOption) error { +func (cr *ConcurrentRunner) RunTask(id uint64, name string, f func(context.Context), opts ...TaskOption) error { task := &Task{ id: id, name: name, @@ -238,8 +238,8 @@ func NewSyncRunner() *SyncRunner { } // RunTask runs the task synchronously. -func (*SyncRunner) RunTask(_ uint64, _ string, f func(), _ ...TaskOption) error { - f() +func (*SyncRunner) RunTask(_ uint64, _ string, f func(context.Context), _ ...TaskOption) error { + f(context.Background()) return nil } diff --git a/pkg/ratelimit/runner_test.go b/pkg/ratelimit/runner_test.go index d4aa0825e83..a9090804a08 100644 --- a/pkg/ratelimit/runner_test.go +++ b/pkg/ratelimit/runner_test.go @@ -36,7 +36,7 @@ func TestConcurrentRunner(t *testing.T) { err := runner.RunTask( uint64(i), "test1", - func() { + func(context.Context) { defer wg.Done() time.Sleep(100 * time.Millisecond) }, @@ -56,7 +56,7 @@ func TestConcurrentRunner(t *testing.T) { err := runner.RunTask( uint64(i), "test2", - func() { + func(context.Context) { defer wg.Done() time.Sleep(100 * time.Millisecond) }, @@ -87,7 +87,7 @@ func TestConcurrentRunner(t *testing.T) { err := runner.RunTask( regionID, "test3", - func() { + func(context.Context) { time.Sleep(time.Second) }, ) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index ed1080f617a..d1f89ca2128 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -1061,10 +1061,8 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ctx.MiscRunner.RunTask( regionID, ratelimit.ObserveRegionStatsAsync, - func() { - if c.regionStats.RegionStatsNeedUpdate(region) { - cluster.Collect(c, region, hasRegionStats) - } + func(ctx context.Context) { + cluster.Collect(ctx, c, region, hasRegionStats) }, ) } @@ -1073,7 +1071,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, - func() { + func(context.Context) { c.CheckAndPutSubTree(region) }, ratelimit.WithRetained(true), @@ -1101,7 +1099,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ctx.TaskRunner.RunTask( regionID, ratelimit.UpdateSubTree, - func() { + func(context.Context) { c.CheckAndPutSubTree(region) }, ratelimit.WithRetained(retained), @@ -1112,8 +1110,8 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ctx.MiscRunner.RunTask( regionID, ratelimit.HandleOverlaps, - func() { - cluster.HandleOverlaps(c, overlaps) + func(ctx context.Context) { + cluster.HandleOverlaps(ctx, c, overlaps) }, ) } @@ -1125,11 +1123,11 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ctx.MiscRunner.RunTask( regionID, ratelimit.CollectRegionStatsAsync, - func() { + func(ctx context.Context) { // TODO: Due to the accuracy requirements of the API "/regions/check/xxx", // region stats needs to be collected in API mode. // We need to think of a better way to reduce this part of the cost in the future. - cluster.Collect(c, region, hasRegionStats) + cluster.Collect(ctx, c, region, hasRegionStats) }, ) @@ -1139,7 +1137,7 @@ func (c *RaftCluster) processRegionHeartbeat(ctx *core.MetaProcessContext, regio ctx.MiscRunner.RunTask( regionID, ratelimit.SaveRegionToKV, - func() { + func(context.Context) { // If there are concurrent heartbeats from the same region, the last write will win even if // writes to storage in the critical area. So don't use mutex to protect it. // Not successfully saved to storage is not fatal, it only leads to longer warm-up