diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index 3e9d2f3abcb..e3bead3ffca 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -799,6 +799,12 @@ func (oc *Controller) GetFastOpInfluence(cluster *core.BasicCluster, influence O } } +// CleanAllOpRecords removes all operators' records. +// It is used in tests only. +func (oc *Controller) CleanAllOpRecords() { + oc.records.ttl.Clear() +} + // AddOpInfluence add operator influence for cluster func AddOpInfluence(op *Operator, influence OpInfluence, cluster *core.BasicCluster) { region := cluster.GetRegion(op.RegionID()) diff --git a/pkg/statistics/hot_cache.go b/pkg/statistics/hot_cache.go index 1868e323b0f..799fb240d10 100644 --- a/pkg/statistics/hot_cache.go +++ b/pkg/statistics/hot_cache.go @@ -205,3 +205,10 @@ func (w *HotCache) GetThresholds(kind utils.RWType, storeID uint64) []float64 { } return nil } + +// CleanCache cleans the cache. +// This is used for test purpose. +func (w *HotCache) CleanCache() { + w.writeCache.removeAllItem() + w.readCache.removeAllItem() +} diff --git a/pkg/statistics/hot_peer_cache.go b/pkg/statistics/hot_peer_cache.go index 1ac07289a3c..0e35e0e23be 100644 --- a/pkg/statistics/hot_peer_cache.go +++ b/pkg/statistics/hot_peer_cache.go @@ -544,6 +544,18 @@ func (f *hotPeerCache) removeItem(item *HotPeerStat) { } } +// removeAllItem removes all items of the cache. +// It is used for test. +func (f *hotPeerCache) removeAllItem() { + for _, peers := range f.peersOfStore { + for _, peer := range peers.GetAll() { + item := peer.(*HotPeerStat) + item.actionType = utils.Remove + f.updateStat(item) + } + } +} + func (f *hotPeerCache) coldItem(newItem, oldItem *HotPeerStat) { newItem.HotDegree = oldItem.HotDegree - 1 newItem.AntiCount = oldItem.AntiCount - 1 diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index 0607b1dee9a..8f5d37ee1bb 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -1,7 +1,6 @@ package scheduling_test import ( - "context" "encoding/hex" "encoding/json" "fmt" @@ -10,7 +9,6 @@ import ( "time" "github.com/pingcap/failpoint" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" _ "github.com/tikv/pd/pkg/mcs/scheduling/server/apis/v1" @@ -34,49 +32,26 @@ var testDialClient = &http.Client{ type apiTestSuite struct { suite.Suite - ctx context.Context - cleanupFunc testutil.CleanupFunc - cluster *tests.TestCluster - server *tests.TestServer - backendEndpoints string - dialClient *http.Client + env *tests.SchedulingTestEnvironment } func TestAPI(t *testing.T) { - suite.Run(t, &apiTestSuite{}) + suite.Run(t, new(apiTestSuite)) } -func (suite *apiTestSuite) SetupTest() { - ctx, cancel := context.WithCancel(context.Background()) - suite.ctx = ctx - cluster, err := tests.NewTestAPICluster(suite.ctx, 1) - suite.cluster = cluster - suite.NoError(err) - suite.NoError(cluster.RunInitialServers()) - suite.NotEmpty(cluster.WaitLeader()) - suite.server = cluster.GetLeaderServer() - suite.NoError(suite.server.BootstrapCluster()) - suite.backendEndpoints = suite.server.GetAddr() - suite.dialClient = &http.Client{ - Transport: &http.Transport{ - DisableKeepAlives: true, - }, - } - suite.cleanupFunc = func() { - cancel() - } - tc, err := tests.NewTestSchedulingCluster(suite.ctx, 2, suite.backendEndpoints) - suite.NoError(err) - suite.cluster.SetSchedulingCluster(tc) - tc.WaitForPrimaryServing(suite.Require()) +func (suite *apiTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) } -func (suite *apiTestSuite) TearDownTest() { - suite.cluster.Destroy() - suite.cleanupFunc() +func (suite *apiTestSuite) TearDownSuite() { + suite.env.Cleanup() } func (suite *apiTestSuite) TestGetCheckerByName() { + suite.env.RunTestInAPIMode(suite.checkGetCheckerByName) +} + +func (suite *apiTestSuite) checkGetCheckerByName(cluster *tests.TestCluster) { re := suite.Require() testCases := []struct { name string @@ -89,7 +64,7 @@ func (suite *apiTestSuite) TestGetCheckerByName() { {name: "joint-state"}, } - s := suite.cluster.GetSchedulingPrimaryServer() + s := cluster.GetSchedulingPrimaryServer() urlPrefix := fmt.Sprintf("%s/scheduling/api/v1/checkers", s.GetAddr()) co := s.GetCoordinator() @@ -119,18 +94,24 @@ func (suite *apiTestSuite) TestGetCheckerByName() { } func (suite *apiTestSuite) TestAPIForward() { + suite.env.RunTestInAPIMode(suite.checkAPIForward) +} + +func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader", "return(true)")) defer func() { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader")) }() - urlPrefix := fmt.Sprintf("%s/pd/api/v1", suite.backendEndpoints) + leader := cluster.GetLeaderServer().GetServer() + urlPrefix := fmt.Sprintf("%s/pd/api/v1", leader.GetAddr()) var slice []string var resp map[string]interface{} testutil.Eventually(re, func() bool { - return suite.cluster.GetLeaderServer().GetServer().GetRaftCluster().IsServiceIndependent(utils.SchedulingServiceName) + return leader.GetRaftCluster().IsServiceIndependent(utils.SchedulingServiceName) }) + // Test operators err := testutil.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "operators"), &slice, testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) @@ -159,13 +140,18 @@ func (suite *apiTestSuite) TestAPIForward() { re.NoError(err) suite.False(resp["paused"].(bool)) - input := make(map[string]interface{}) - input["delay"] = 10 - pauseArgs, err := json.Marshal(input) - suite.NoError(err) - err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "checker/merge"), pauseArgs, - testutil.StatusOK(re), testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) - suite.NoError(err) + // Test pause + postChecker := func(delay int) { + input := make(map[string]interface{}) + input["delay"] = delay + pauseArgs, err := json.Marshal(input) + suite.NoError(err) + err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "checker/merge"), pauseArgs, + testutil.StatusOK(re), testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) + suite.NoError(err) + } + postChecker(30) + postChecker(0) // Test scheduler: // Need to redirect: @@ -183,12 +169,17 @@ func (suite *apiTestSuite) TestAPIForward() { re.NoError(err) re.Contains(slice, "balance-leader-scheduler") - input["delay"] = 30 - pauseArgs, err = json.Marshal(input) - suite.NoError(err) - err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/balance-leader-scheduler"), pauseArgs, - testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) - suite.NoError(err) + postScheduler := func(delay int) { + input := make(map[string]interface{}) + input["delay"] = delay + pauseArgs, err := json.Marshal(input) + suite.NoError(err) + err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/balance-leader-scheduler"), pauseArgs, + testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) + suite.NoError(err) + } + postScheduler(30) + postScheduler(0) err = testutil.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/diagnostic/balance-leader-scheduler"), &resp, testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) @@ -212,7 +203,7 @@ func (suite *apiTestSuite) TestAPIForward() { suite.NoError(err) } - err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers"), pauseArgs, + err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers"), nil, testutil.WithoutHeader(re, apiutil.ForwardToMicroServiceHeader)) re.NoError(err) @@ -220,6 +211,14 @@ func (suite *apiTestSuite) TestAPIForward() { testutil.WithoutHeader(re, apiutil.ForwardToMicroServiceHeader)) re.NoError(err) + input := make(map[string]interface{}) + input["name"] = "balance-leader-scheduler" + b, err := json.Marshal(input) + re.NoError(err) + err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers"), b, + testutil.WithoutHeader(re, apiutil.ForwardToMicroServiceHeader)) + re.NoError(err) + // Test hotspot var hotRegions statistics.StoreHotPeersInfos err = testutil.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "hotspot/regions/write"), &hotRegions, @@ -288,7 +287,7 @@ func (suite *apiTestSuite) TestAPIForward() { suite.NoError(err) // Test rules: only forward `GET` request var rules []*placement.Rule - tests.MustPutRegion(re, suite.cluster, 2, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) + tests.MustPutRegion(re, cluster, 2, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) rules = []*placement.Rule{ { GroupID: placement.DefaultGroupID, @@ -362,141 +361,141 @@ func (suite *apiTestSuite) TestAPIForward() { } func (suite *apiTestSuite) TestConfig() { - checkConfig := func(cluster *tests.TestCluster) { - re := suite.Require() - s := cluster.GetSchedulingPrimaryServer() - testutil.Eventually(re, func() bool { - return s.IsServing() - }, testutil.WithWaitFor(5*time.Second), testutil.WithTickInterval(50*time.Millisecond)) - addr := s.GetAddr() - urlPrefix := fmt.Sprintf("%s/scheduling/api/v1/config", addr) - - var cfg config.Config - testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) - suite.Equal(cfg.GetListenAddr(), s.GetConfig().GetListenAddr()) - suite.Equal(cfg.Schedule.LeaderScheduleLimit, s.GetConfig().Schedule.LeaderScheduleLimit) - suite.Equal(cfg.Schedule.EnableCrossTableMerge, s.GetConfig().Schedule.EnableCrossTableMerge) - suite.Equal(cfg.Replication.MaxReplicas, s.GetConfig().Replication.MaxReplicas) - suite.Equal(cfg.Replication.LocationLabels, s.GetConfig().Replication.LocationLabels) - suite.Equal(cfg.DataDir, s.GetConfig().DataDir) - testutil.Eventually(re, func() bool { - // wait for all schedulers to be loaded in scheduling server. - return len(cfg.Schedule.SchedulersPayload) == 5 - }) - suite.Contains(cfg.Schedule.SchedulersPayload, "balance-leader-scheduler") - suite.Contains(cfg.Schedule.SchedulersPayload, "balance-region-scheduler") - suite.Contains(cfg.Schedule.SchedulersPayload, "balance-hot-region-scheduler") - suite.Contains(cfg.Schedule.SchedulersPayload, "balance-witness-scheduler") - suite.Contains(cfg.Schedule.SchedulersPayload, "transfer-witness-leader-scheduler") - } - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInAPIMode(checkConfig) + suite.env.RunTestInAPIMode(suite.checkConfig) +} + +func (suite *apiTestSuite) checkConfig(cluster *tests.TestCluster) { + re := suite.Require() + s := cluster.GetSchedulingPrimaryServer() + testutil.Eventually(re, func() bool { + return s.IsServing() + }, testutil.WithWaitFor(5*time.Second), testutil.WithTickInterval(50*time.Millisecond)) + addr := s.GetAddr() + urlPrefix := fmt.Sprintf("%s/scheduling/api/v1/config", addr) + + var cfg config.Config + testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) + suite.Equal(cfg.GetListenAddr(), s.GetConfig().GetListenAddr()) + suite.Equal(cfg.Schedule.LeaderScheduleLimit, s.GetConfig().Schedule.LeaderScheduleLimit) + suite.Equal(cfg.Schedule.EnableCrossTableMerge, s.GetConfig().Schedule.EnableCrossTableMerge) + suite.Equal(cfg.Replication.MaxReplicas, s.GetConfig().Replication.MaxReplicas) + suite.Equal(cfg.Replication.LocationLabels, s.GetConfig().Replication.LocationLabels) + suite.Equal(cfg.DataDir, s.GetConfig().DataDir) + testutil.Eventually(re, func() bool { + // wait for all schedulers to be loaded in scheduling server. + return len(cfg.Schedule.SchedulersPayload) == 5 + }) + suite.Contains(cfg.Schedule.SchedulersPayload, "balance-leader-scheduler") + suite.Contains(cfg.Schedule.SchedulersPayload, "balance-region-scheduler") + suite.Contains(cfg.Schedule.SchedulersPayload, "balance-hot-region-scheduler") + suite.Contains(cfg.Schedule.SchedulersPayload, "balance-witness-scheduler") + suite.Contains(cfg.Schedule.SchedulersPayload, "transfer-witness-leader-scheduler") +} + +func (suite *apiTestSuite) TestConfigForward() { + suite.env.RunTestInAPIMode(suite.checkConfigForward) } -func TestConfigForward(t *testing.T) { - re := require.New(t) - checkConfigForward := func(cluster *tests.TestCluster) { - sche := cluster.GetSchedulingPrimaryServer() - opts := sche.GetPersistConfig() - var cfg map[string]interface{} - addr := cluster.GetLeaderServer().GetAddr() - urlPrefix := fmt.Sprintf("%s/pd/api/v1/config", addr) - - // Test config forward - // Expect to get same config in scheduling server and api server - testutil.Eventually(re, func() bool { - testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) - re.Equal(cfg["schedule"].(map[string]interface{})["leader-schedule-limit"], - float64(opts.GetLeaderScheduleLimit())) - re.Equal(cfg["replication"].(map[string]interface{})["max-replicas"], - float64(opts.GetReplicationConfig().MaxReplicas)) - schedulers := cfg["schedule"].(map[string]interface{})["schedulers-payload"].(map[string]interface{}) - return len(schedulers) == 5 - }) - - // Test to change config in api server - // Expect to get new config in scheduling server and api server - reqData, err := json.Marshal(map[string]interface{}{ - "max-replicas": 4, - }) - re.NoError(err) - err = testutil.CheckPostJSON(testDialClient, urlPrefix, reqData, testutil.StatusOK(re)) - re.NoError(err) - testutil.Eventually(re, func() bool { - testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) - return cfg["replication"].(map[string]interface{})["max-replicas"] == 4. && - opts.GetReplicationConfig().MaxReplicas == 4. - }) - - // Test to change config only in scheduling server - // Expect to get new config in scheduling server but not old config in api server - opts.GetScheduleConfig().LeaderScheduleLimit = 100 - re.Equal(100, int(opts.GetLeaderScheduleLimit())) +func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { + re := suite.Require() + sche := cluster.GetSchedulingPrimaryServer() + opts := sche.GetPersistConfig() + var cfg map[string]interface{} + addr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/config", addr) + + // Test config forward + // Expect to get same config in scheduling server and api server + testutil.Eventually(re, func() bool { testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) - re.Equal(100., cfg["schedule"].(map[string]interface{})["leader-schedule-limit"]) - opts.GetReplicationConfig().MaxReplicas = 5 - re.Equal(5, int(opts.GetReplicationConfig().MaxReplicas)) + re.Equal(cfg["schedule"].(map[string]interface{})["leader-schedule-limit"], + float64(opts.GetLeaderScheduleLimit())) + re.Equal(cfg["replication"].(map[string]interface{})["max-replicas"], + float64(opts.GetReplicationConfig().MaxReplicas)) + schedulers := cfg["schedule"].(map[string]interface{})["schedulers-payload"].(map[string]interface{}) + return len(schedulers) == 5 + }) + + // Test to change config in api server + // Expect to get new config in scheduling server and api server + reqData, err := json.Marshal(map[string]interface{}{ + "max-replicas": 4, + }) + re.NoError(err) + err = testutil.CheckPostJSON(testDialClient, urlPrefix, reqData, testutil.StatusOK(re)) + re.NoError(err) + testutil.Eventually(re, func() bool { testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) - re.Equal(5., cfg["replication"].(map[string]interface{})["max-replicas"]) - } - env := tests.NewSchedulingTestEnvironment(t) - env.RunTestInAPIMode(checkConfigForward) + return cfg["replication"].(map[string]interface{})["max-replicas"] == 4. && + opts.GetReplicationConfig().MaxReplicas == 4. + }) + + // Test to change config only in scheduling server + // Expect to get new config in scheduling server but not old config in api server + opts.GetScheduleConfig().LeaderScheduleLimit = 100 + re.Equal(100, int(opts.GetLeaderScheduleLimit())) + testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) + re.Equal(100., cfg["schedule"].(map[string]interface{})["leader-schedule-limit"]) + opts.GetReplicationConfig().MaxReplicas = 5 + re.Equal(5, int(opts.GetReplicationConfig().MaxReplicas)) + testutil.ReadGetJSON(re, testDialClient, urlPrefix, &cfg) + re.Equal(5., cfg["replication"].(map[string]interface{})["max-replicas"]) } -func TestAdminRegionCache(t *testing.T) { - re := require.New(t) - checkAdminRegionCache := func(cluster *tests.TestCluster) { - r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) - tests.MustPutRegionInfo(re, cluster, r1) - r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) - tests.MustPutRegionInfo(re, cluster, r2) - r3 := core.NewTestRegionInfo(30, 1, []byte("c"), []byte(""), core.SetRegionConfVer(100), core.SetRegionVersion(100)) - tests.MustPutRegionInfo(re, cluster, r3) - - schedulingServer := cluster.GetSchedulingPrimaryServer() - re.Equal(3, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - - addr := schedulingServer.GetAddr() - urlPrefix := fmt.Sprintf("%s/scheduling/api/v1/admin/cache/regions", addr) - err := testutil.CheckDelete(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "30"), testutil.StatusOK(re)) - re.NoError(err) - re.Equal(2, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - - err = testutil.CheckDelete(testDialClient, urlPrefix, testutil.StatusOK(re)) - re.NoError(err) - re.Equal(0, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - } - env := tests.NewSchedulingTestEnvironment(t) - env.RunTestInAPIMode(checkAdminRegionCache) +func (suite *apiTestSuite) TestAdminRegionCache() { + suite.env.RunTestInAPIMode(suite.checkAdminRegionCache) } -func TestAdminRegionCacheForward(t *testing.T) { - re := require.New(t) - checkAdminRegionCache := func(cluster *tests.TestCluster) { - r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) - tests.MustPutRegionInfo(re, cluster, r1) - r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) - tests.MustPutRegionInfo(re, cluster, r2) - r3 := core.NewTestRegionInfo(30, 1, []byte("c"), []byte(""), core.SetRegionConfVer(100), core.SetRegionVersion(100)) - tests.MustPutRegionInfo(re, cluster, r3) - - apiServer := cluster.GetLeaderServer().GetServer() - schedulingServer := cluster.GetSchedulingPrimaryServer() - re.Equal(3, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - re.Equal(3, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count) - - addr := cluster.GetLeaderServer().GetAddr() - urlPrefix := fmt.Sprintf("%s/pd/api/v1/admin/cache/region", addr) - err := testutil.CheckDelete(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "30"), testutil.StatusOK(re)) - re.NoError(err) - re.Equal(2, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - re.Equal(2, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count) - - err = testutil.CheckDelete(testDialClient, urlPrefix+"s", testutil.StatusOK(re)) - re.NoError(err) - re.Equal(0, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) - re.Equal(0, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count) - } - env := tests.NewSchedulingTestEnvironment(t) - env.RunTestInAPIMode(checkAdminRegionCache) +func (suite *apiTestSuite) checkAdminRegionCache(cluster *tests.TestCluster) { + re := suite.Require() + r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) + tests.MustPutRegionInfo(re, cluster, r1) + r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) + tests.MustPutRegionInfo(re, cluster, r2) + r3 := core.NewTestRegionInfo(30, 1, []byte("c"), []byte(""), core.SetRegionConfVer(100), core.SetRegionVersion(100)) + tests.MustPutRegionInfo(re, cluster, r3) + + schedulingServer := cluster.GetSchedulingPrimaryServer() + re.Equal(3, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) + + addr := schedulingServer.GetAddr() + urlPrefix := fmt.Sprintf("%s/scheduling/api/v1/admin/cache/regions", addr) + err := testutil.CheckDelete(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "30"), testutil.StatusOK(re)) + re.NoError(err) + re.Equal(2, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) + + err = testutil.CheckDelete(testDialClient, urlPrefix, testutil.StatusOK(re)) + re.NoError(err) + re.Equal(0, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) +} + +func (suite *apiTestSuite) TestAdminRegionCacheForward() { + suite.env.RunTestInAPIMode(suite.checkAdminRegionCacheForward) +} + +func (suite *apiTestSuite) checkAdminRegionCacheForward(cluster *tests.TestCluster) { + re := suite.Require() + r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) + tests.MustPutRegionInfo(re, cluster, r1) + r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetRegionConfVer(100), core.SetRegionVersion(100)) + tests.MustPutRegionInfo(re, cluster, r2) + r3 := core.NewTestRegionInfo(30, 1, []byte("c"), []byte(""), core.SetRegionConfVer(100), core.SetRegionVersion(100)) + tests.MustPutRegionInfo(re, cluster, r3) + + apiServer := cluster.GetLeaderServer().GetServer() + schedulingServer := cluster.GetSchedulingPrimaryServer() + re.Equal(3, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) + re.Equal(3, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count) + + addr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/admin/cache/region", addr) + err := testutil.CheckDelete(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "30"), testutil.StatusOK(re)) + re.NoError(err) + re.Equal(2, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) + re.Equal(2, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count) + + err = testutil.CheckDelete(testDialClient, urlPrefix+"s", testutil.StatusOK(re)) + re.NoError(err) + re.Equal(0, schedulingServer.GetCluster().GetRegionCount([]byte{}, []byte{})) + re.Equal(0, apiServer.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count) } diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 3b3310185b1..c63160a32e5 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -17,8 +17,10 @@ package config_test import ( "context" "encoding/json" + "net/http" "os" "reflect" + "strconv" "strings" "testing" "time" @@ -38,6 +40,13 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) +// testDialClient used to dial http request. only used for test. +var testDialClient = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + }, +} + type testCase struct { name string value interface{} @@ -54,16 +63,43 @@ func (t *testCase) judge(re *require.Assertions, scheduleConfigs ...*sc.Schedule type configTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestConfigTestSuite(t *testing.T) { suite.Run(t, new(configTestSuite)) } +func (suite *configTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) +} + +func (suite *configTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + +func (suite *configTestSuite) TearDownTest() { + cleanFunc := func(cluster *tests.TestCluster) { + def := placement.GroupBundle{ + ID: "pd", + Rules: []*placement.Rule{ + {GroupID: "pd", ID: "default", Role: "voter", Count: 3}, + }, + } + data, err := json.Marshal([]placement.GroupBundle{def}) + suite.NoError(err) + leader := cluster.GetLeaderServer() + suite.NotNil(leader) + urlPrefix := leader.GetAddr() + err = testutil.CheckPostJSON(testDialClient, urlPrefix+"/pd/api/v1/config/placement-rule", data, testutil.StatusOK(suite.Require())) + suite.NoError(err) + } + suite.env.RunFuncInTwoModes(cleanFunc) +} + func (suite *configTestSuite) TestConfig() { suite.NoError(failpoint.Enable("github.com/tikv/pd/pkg/dashboard/adapter/skipDashboardLoop", `return(true)`)) - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfig) + suite.env.RunTestInTwoModes(suite.checkConfig) suite.NoError(failpoint.Disable("github.com/tikv/pd/pkg/dashboard/adapter/skipDashboardLoop")) } @@ -79,7 +115,6 @@ func (suite *configTestSuite) checkConfig(cluster *tests.TestCluster) { } svr := leaderServer.GetServer() tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() // config show args := []string{"-u", pdAddr, "config", "show"} @@ -111,6 +146,7 @@ func (suite *configTestSuite) checkConfig(cluster *tests.TestCluster) { re.NoError(err) re.False(svr.GetPDServerConfig().TraceRegionFlow) + origin := svr.GetPDServerConfig().FlowRoundByDigit args = []string{"-u", pdAddr, "config", "set", "flow-round-by-digit", "10"} _, err = pdctl.ExecuteCommand(cmd, args...) re.NoError(err) @@ -120,6 +156,17 @@ func (suite *configTestSuite) checkConfig(cluster *tests.TestCluster) { _, err = pdctl.ExecuteCommand(cmd, args...) re.Error(err) + args = []string{"-u", pdAddr, "config", "set", "flow-round-by-digit", strconv.Itoa(origin)} + _, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "show", "server") + re.NoError(err) + var conf config.PDServerConfig + re.NoError(json.Unmarshal(output, &conf)) + return conf.FlowRoundByDigit == origin + }) + // config show schedule args = []string{"-u", pdAddr, "config", "show", "schedule"} output, err = pdctl.ExecuteCommand(cmd, args...) @@ -295,8 +342,7 @@ func (suite *configTestSuite) checkConfig(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestPlacementRules() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkPlacementRules) + suite.env.RunTestInTwoModes(suite.checkPlacementRules) } func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { @@ -311,7 +357,6 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { LastHeartbeat: time.Now().UnixNano(), } tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") re.NoError(err) @@ -363,8 +408,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestPlacementRuleGroups() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkPlacementRuleGroups) + suite.env.RunTestInTwoModes(suite.checkPlacementRuleGroups) } func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluster) { @@ -379,8 +423,6 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluste LastHeartbeat: time.Now().UnixNano(), } tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() - output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") re.NoError(err) re.Contains(string(output), "Success!") @@ -443,8 +485,7 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluste } func (suite *configTestSuite) TestPlacementRuleBundle() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkPlacementRuleBundle) + suite.env.RunTestInTwoModes(suite.checkPlacementRuleBundle) } func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluster) { @@ -459,7 +500,6 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste LastHeartbeat: time.Now().UnixNano(), } tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") re.NoError(err) @@ -549,6 +589,25 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) + + // set default rule only + bundles = []placement.GroupBundle{{ + ID: "pd", + Rules: []*placement.Rule{ + {GroupID: "pd", ID: "default", Role: "voter", Count: 3}, + }, + }} + b, err = json.Marshal(bundles) + re.NoError(err) + re.NoError(os.WriteFile(fname, b, 0600)) + _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname) + re.NoError(err) + _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "--regexp", ".*f") + re.NoError(err) + + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ + {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, + }) } func (suite *configTestSuite) checkLoadRuleBundle(pdAddr string, fname string, expectValues []placement.GroupBundle) { @@ -627,7 +686,6 @@ func TestReplicationMode(t *testing.T) { leaderServer := cluster.GetLeaderServer() re.NoError(leaderServer.BootstrapCluster()) tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() conf := config.ReplicationModeConfig{ ReplicationMode: "majority", @@ -667,8 +725,7 @@ func TestReplicationMode(t *testing.T) { } func (suite *configTestSuite) TestUpdateDefaultReplicaConfig() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkUpdateDefaultReplicaConfig) + suite.env.RunTestInTwoModes(suite.checkUpdateDefaultReplicaConfig) } func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.TestCluster) { @@ -682,8 +739,6 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.Tes State: metapb.StoreState_Up, } tests.MustPutStore(re, cluster, store) - defer cluster.Destroy() - checkMaxReplicas := func(expect uint64) { args := []string{"-u", pdAddr, "config", "show", "replication"} testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server @@ -819,8 +874,7 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *tests.Tes } func (suite *configTestSuite) TestPDServerConfig() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkPDServerConfig) + suite.env.RunTestInTwoModes(suite.checkPDServerConfig) } func (suite *configTestSuite) checkPDServerConfig(cluster *tests.TestCluster) { diff --git a/tests/pdctl/hot/hot_test.go b/tests/pdctl/hot/hot_test.go index 8cab8ea9ab2..03c26f40441 100644 --- a/tests/pdctl/hot/hot_test.go +++ b/tests/pdctl/hot/hot_test.go @@ -42,30 +42,40 @@ import ( type hotTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestHotTestSuite(t *testing.T) { suite.Run(t, new(hotTestSuite)) } -func (suite *hotTestSuite) TestHot() { - var start time.Time - start = start.Add(time.Hour) - opts := []tests.ConfigOption{ +func (suite *hotTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { - conf.Schedule.MaxStoreDownTime.Duration = time.Since(start) + conf.Schedule.MaxStoreDownTime.Duration = time.Hour + conf.Schedule.HotRegionCacheHitsThreshold = 0 }, + ) +} + +func (suite *hotTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + +func (suite *hotTestSuite) TearDownTest() { + cleanFunc := func(cluster *tests.TestCluster) { + leader := cluster.GetLeaderServer() + hotStat := leader.GetRaftCluster().GetHotStat() + if sche := cluster.GetSchedulingPrimaryServer(); sche != nil { + hotStat = sche.GetCluster().GetHotStat() + } + hotStat.HotCache.CleanCache() } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkHot) + suite.env.RunFuncInTwoModes(cleanFunc) +} - opts = append(opts, func(conf *config.Config, serverName string) { - conf.Schedule.HotRegionCacheHitsThreshold = 0 - }) - env = tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkHotWithoutHotPeer) - env = tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkHotWithStoreID) +func (suite *hotTestSuite) TestHot() { + suite.env.RunTestInTwoModes(suite.checkHot) } func (suite *hotTestSuite) checkHot(cluster *tests.TestCluster) { @@ -229,6 +239,10 @@ func (suite *hotTestSuite) checkHot(cluster *tests.TestCluster) { testCommand(reportIntervals, "read") } +func (suite *hotTestSuite) TestHotWithStoreID() { + suite.env.RunTestInTwoModes(suite.checkHotWithStoreID) +} + func (suite *hotTestSuite) checkHotWithStoreID(cluster *tests.TestCluster) { re := suite.Require() statistics.Denoising = false @@ -292,6 +306,10 @@ func (suite *hotTestSuite) checkHotWithStoreID(cluster *tests.TestCluster) { re.Equal(float64(200000000), hotRegion.AsLeader[1].TotalBytesRate) } +func (suite *hotTestSuite) TestHotWithoutHotPeer() { + suite.env.RunTestInTwoModes(suite.checkHotWithoutHotPeer) +} + func (suite *hotTestSuite) checkHotWithoutHotPeer(cluster *tests.TestCluster) { re := suite.Require() statistics.Denoising = false @@ -363,10 +381,10 @@ func (suite *hotTestSuite) checkHotWithoutHotPeer(cluster *tests.TestCluster) { hotRegion := statistics.StoreHotPeersInfos{} re.NoError(err) re.NoError(json.Unmarshal(output, &hotRegion)) - re.Equal(hotRegion.AsPeer[1].Count, 0) + re.Equal(0, hotRegion.AsPeer[1].Count) re.Equal(0.0, hotRegion.AsPeer[1].TotalBytesRate) re.Equal(load, hotRegion.AsPeer[1].StoreByteRate) - re.Equal(hotRegion.AsLeader[1].Count, 0) + re.Equal(0, hotRegion.AsLeader[1].Count) re.Equal(0.0, hotRegion.AsLeader[1].TotalBytesRate) re.Equal(0.0, hotRegion.AsLeader[1].StoreByteRate) // write leader sum } diff --git a/tests/pdctl/operator/operator_test.go b/tests/pdctl/operator/operator_test.go index 8bb034993fa..aa2fe5d1304 100644 --- a/tests/pdctl/operator/operator_test.go +++ b/tests/pdctl/operator/operator_test.go @@ -34,27 +34,30 @@ import ( type operatorTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestOperatorTestSuite(t *testing.T) { suite.Run(t, new(operatorTestSuite)) } -func (suite *operatorTestSuite) TestOperator() { - var start time.Time - start = start.Add(time.Hour) - opts := []tests.ConfigOption{ - // TODO: enable placementrules +func (suite *operatorTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + // TODO: enable placement rules conf.Replication.MaxReplicas = 2 conf.Replication.EnablePlacementRules = false + conf.Schedule.MaxStoreDownTime.Duration = time.Hour }, - func(conf *config.Config, serverName string) { - conf.Schedule.MaxStoreDownTime.Duration = time.Since(start) - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkOperator) + ) +} + +func (suite *operatorTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + +func (suite *operatorTestSuite) TestOperator() { + suite.env.RunTestInTwoModes(suite.checkOperator) } func (suite *operatorTestSuite) checkOperator(cluster *tests.TestCluster) { diff --git a/tests/pdctl/scheduler/scheduler_test.go b/tests/pdctl/scheduler/scheduler_test.go index d5bea895683..d8d54a79d13 100644 --- a/tests/pdctl/scheduler/scheduler_test.go +++ b/tests/pdctl/scheduler/scheduler_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/spf13/cobra" "github.com/stretchr/testify/require" @@ -39,15 +40,61 @@ import ( type schedulerTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment + defaultSchedulers []string } func TestSchedulerTestSuite(t *testing.T) { suite.Run(t, new(schedulerTestSuite)) } +func (suite *schedulerTestSuite) SetupSuite() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/skipStoreConfigSync", `return(true)`)) + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) + suite.defaultSchedulers = []string{ + "balance-leader-scheduler", + "balance-region-scheduler", + "balance-hot-region-scheduler", + "balance-witness-scheduler", + "transfer-witness-leader-scheduler", + } +} + +func (suite *schedulerTestSuite) TearDownSuite() { + suite.env.Cleanup() + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/skipStoreConfigSync")) +} + +func (suite *schedulerTestSuite) TearDownTest() { + cleanFunc := func(cluster *tests.TestCluster) { + re := suite.Require() + pdAddr := cluster.GetConfig().GetClientURL() + cmd := pdctlCmd.GetRootCmd() + + var currentSchedulers []string + mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, ¤tSchedulers) + for _, scheduler := range suite.defaultSchedulers { + if slice.NoneOf(currentSchedulers, func(i int) bool { + return currentSchedulers[i] == scheduler + }) { + echo := mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "add", scheduler}, nil) + re.Contains(echo, "Success!") + } + } + for _, scheduler := range currentSchedulers { + if slice.NoneOf(suite.defaultSchedulers, func(i int) bool { + return suite.defaultSchedulers[i] == scheduler + }) { + echo := mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", scheduler}, nil) + re.Contains(echo, "Success!") + } + } + } + suite.env.RunFuncInTwoModes(cleanFunc) +} + func (suite *schedulerTestSuite) TestScheduler() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkScheduler) + suite.env.RunTestInTwoModes(suite.checkScheduler) } func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) { @@ -436,6 +483,7 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) { for _, store := range stores { version := versioninfo.HotScheduleWithQuery store.Version = versioninfo.MinSupportedVersion(version).String() + store.LastHeartbeat = time.Now().UnixNano() tests.MustPutStore(re, cluster, store) } re.Equal("5.2.0", leaderServer.GetClusterVersion().String()) @@ -564,8 +612,7 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) { } func (suite *schedulerTestSuite) TestSchedulerDiagnostic() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkSchedulerDiagnostic) + suite.env.RunTestInTwoModes(suite.checkSchedulerDiagnostic) } func (suite *schedulerTestSuite) checkSchedulerDiagnostic(cluster *tests.TestCluster) { diff --git a/tests/server/api/checker_test.go b/tests/server/api/checker_test.go index 0f359553b73..8037fcc3989 100644 --- a/tests/server/api/checker_test.go +++ b/tests/server/api/checker_test.go @@ -27,14 +27,23 @@ import ( type checkerTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestCheckerTestSuite(t *testing.T) { suite.Run(t, new(checkerTestSuite)) } + +func (suite *checkerTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) +} + +func (suite *checkerTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + func (suite *checkerTestSuite) TestAPI() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkAPI) + suite.env.RunTestInTwoModes(suite.checkAPI) } func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { diff --git a/tests/server/api/operator_test.go b/tests/server/api/operator_test.go index c27ebbe7ee8..41a687b1181 100644 --- a/tests/server/api/operator_test.go +++ b/tests/server/api/operator_test.go @@ -45,20 +45,26 @@ var ( type operatorTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestOperatorTestSuite(t *testing.T) { suite.Run(t, new(operatorTestSuite)) } -func (suite *operatorTestSuite) TestAddRemovePeer() { - opts := []tests.ConfigOption{ +func (suite *operatorTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 1 - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkAddRemovePeer) + }) +} + +func (suite *operatorTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + +func (suite *operatorTestSuite) TestAddRemovePeer() { + suite.env.RunTestInTwoModes(suite.checkAddRemovePeer) } func (suite *operatorTestSuite) checkAddRemovePeer(cluster *tests.TestCluster) { @@ -168,17 +174,36 @@ func (suite *operatorTestSuite) checkAddRemovePeer(cluster *tests.TestCluster) { } func (suite *operatorTestSuite) TestMergeRegionOperator() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.Replication.MaxReplicas = 1 - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkMergeRegionOperator) + suite.env.RunTestInTwoModes(suite.checkMergeRegionOperator) } func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestCluster) { re := suite.Require() + stores := []*metapb.Store{ + { + Id: 1, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + { + Id: 2, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + { + Id: 3, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + } + + for _, store := range stores { + tests.MustPutStore(re, cluster, store) + } + suite.pauseRuleChecker(cluster) r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1)) tests.MustPutRegionInfo(re, cluster, r1) @@ -204,13 +229,13 @@ func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestClus } func (suite *operatorTestSuite) TestTransferRegionWithPlacementRule() { - opts := []tests.ConfigOption{ + // use a new environment to avoid affecting other tests + env := tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 3 - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) + }) env.RunTestInTwoModes(suite.checkTransferRegionWithPlacementRule) + env.Cleanup() } func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *tests.TestCluster) { diff --git a/tests/server/api/region_test.go b/tests/server/api/region_test.go index dcd31d6462d..450995a6e5e 100644 --- a/tests/server/api/region_test.go +++ b/tests/server/api/region_test.go @@ -19,6 +19,7 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "testing" "github.com/pingcap/failpoint" @@ -27,21 +28,67 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/placement" tu "github.com/tikv/pd/pkg/utils/testutil" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" ) type regionTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestRegionTestSuite(t *testing.T) { suite.Run(t, new(regionTestSuite)) } +func (suite *regionTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) +} + +func (suite *regionTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + +func (suite *regionTestSuite) TearDownTest() { + cleanFunc := func(cluster *tests.TestCluster) { + // clean region cache + leader := cluster.GetLeaderServer() + re := suite.Require() + pdAddr := cluster.GetConfig().GetClientURL() + for _, region := range leader.GetRegions() { + url := fmt.Sprintf("%s/pd/api/v1/admin/cache/region/%d", pdAddr, region.GetID()) + err := tu.CheckDelete(testDialClient, url, tu.StatusOK(re)) + suite.NoError(err) + } + suite.Empty(leader.GetRegions()) + // clean rules + def := placement.GroupBundle{ + ID: "pd", + Rules: []*placement.Rule{ + {GroupID: "pd", ID: "default", Role: "voter", Count: 3}, + }, + } + data, err := json.Marshal([]placement.GroupBundle{def}) + suite.NoError(err) + urlPrefix := cluster.GetLeaderServer().GetAddr() + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/pd/api/v1/config/placement-rule", data, tu.StatusOK(suite.Require())) + suite.NoError(err) + // clean stores + // TODO: cannot sync to scheduling server? + for _, store := range leader.GetStores() { + suite.NoError(cluster.GetLeaderServer().GetRaftCluster().RemoveStore(store.GetId(), true)) + suite.NoError(cluster.GetLeaderServer().GetRaftCluster().BuryStore(store.GetId(), true)) + } + suite.NoError(cluster.GetLeaderServer().GetRaftCluster().RemoveTombStoneRecords()) + suite.Empty(leader.GetStores()) + } + suite.env.RunFuncInTwoModes(cleanFunc) +} + func (suite *regionTestSuite) TestSplitRegions() { + // use a new environment to avoid affecting other tests env := tests.NewSchedulingTestEnvironment(suite.T()) env.RunTestInTwoModes(suite.checkSplitRegions) + env.Cleanup() } func (suite *regionTestSuite) checkSplitRegions(cluster *tests.TestCluster) { @@ -81,12 +128,7 @@ func (suite *regionTestSuite) checkSplitRegions(cluster *tests.TestCluster) { } func (suite *regionTestSuite) TestAccelerateRegionsScheduleInRange() { - env := tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { - // FIXME: enable placement rules - conf.Replication.EnablePlacementRules = false - conf.Replication.MaxReplicas = 1 - }) - env.RunTestInTwoModes(suite.checkAccelerateRegionsScheduleInRange) + suite.env.RunTestInTwoModes(suite.checkAccelerateRegionsScheduleInRange) } func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tests.TestCluster) { @@ -101,13 +143,13 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tes } tests.MustPutStore(re, cluster, s1) } - r1 := core.NewTestRegionInfo(557, 1, []byte("a1"), []byte("a2")) - r2 := core.NewTestRegionInfo(558, 2, []byte("a2"), []byte("a3")) - r3 := core.NewTestRegionInfo(559, 3, []byte("a3"), []byte("a4")) - tests.MustPutRegionInfo(re, cluster, r1) - tests.MustPutRegionInfo(re, cluster, r2) - tests.MustPutRegionInfo(re, cluster, r3) - suite.checkRegionCount(cluster, 3) + regionCount := uint64(3) + for i := uint64(1); i <= regionCount; i++ { + r1 := core.NewTestRegionInfo(550+i, 1, []byte("a"+strconv.FormatUint(i, 10)), []byte("a"+strconv.FormatUint(i+1, 10))) + r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 100 + i, StoreId: (i + 1) % regionCount}, &metapb.Peer{Id: 200 + i, StoreId: (i + 2) % regionCount}) + tests.MustPutRegionInfo(re, cluster, r1) + } + suite.checkRegionCount(cluster, regionCount) body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3"))) err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/accelerate-schedule", urlPrefix), []byte(body), @@ -121,19 +163,14 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tes } func (suite *regionTestSuite) TestAccelerateRegionsScheduleInRanges() { - env := tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { - // FIXME: enable placement rules - conf.Replication.EnablePlacementRules = false - conf.Replication.MaxReplicas = 1 - }) - env.RunTestInTwoModes(suite.checkAccelerateRegionsScheduleInRanges) + suite.env.RunTestInTwoModes(suite.checkAccelerateRegionsScheduleInRanges) } func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *tests.TestCluster) { leader := cluster.GetLeaderServer() urlPrefix := leader.GetAddr() + "/pd/api/v1" re := suite.Require() - for i := 1; i <= 5; i++ { + for i := 1; i <= 6; i++ { s1 := &metapb.Store{ Id: uint64(i), State: metapb.StoreState_Up, @@ -141,17 +178,13 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te } tests.MustPutStore(re, cluster, s1) } - r1 := core.NewTestRegionInfo(557, 1, []byte("a1"), []byte("a2")) - r2 := core.NewTestRegionInfo(558, 2, []byte("a2"), []byte("a3")) - r3 := core.NewTestRegionInfo(559, 3, []byte("a3"), []byte("a4")) - r4 := core.NewTestRegionInfo(560, 4, []byte("a4"), []byte("a5")) - r5 := core.NewTestRegionInfo(561, 5, []byte("a5"), []byte("a6")) - tests.MustPutRegionInfo(re, cluster, r1) - tests.MustPutRegionInfo(re, cluster, r2) - tests.MustPutRegionInfo(re, cluster, r3) - tests.MustPutRegionInfo(re, cluster, r4) - tests.MustPutRegionInfo(re, cluster, r5) - suite.checkRegionCount(cluster, 5) + regionCount := uint64(6) + for i := uint64(1); i <= regionCount; i++ { + r1 := core.NewTestRegionInfo(550+i, 1, []byte("a"+strconv.FormatUint(i, 10)), []byte("a"+strconv.FormatUint(i+1, 10))) + r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 100 + i, StoreId: (i + 1) % regionCount}, &metapb.Peer{Id: 200 + i, StoreId: (i + 2) % regionCount}) + tests.MustPutRegionInfo(re, cluster, r1) + } + suite.checkRegionCount(cluster, regionCount) body := fmt.Sprintf(`[{"start_key":"%s", "end_key": "%s"}, {"start_key":"%s", "end_key": "%s"}]`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")), hex.EncodeToString([]byte("a4")), hex.EncodeToString([]byte("a6"))) @@ -166,8 +199,10 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te } func (suite *regionTestSuite) TestScatterRegions() { + // use a new environment to avoid affecting other tests env := tests.NewSchedulingTestEnvironment(suite.T()) env.RunTestInTwoModes(suite.checkScatterRegions) + env.Cleanup() } func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) { @@ -182,11 +217,11 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) { } tests.MustPutStore(re, cluster, s1) } - r1 := core.NewTestRegionInfo(601, 13, []byte("b1"), []byte("b2")) + r1 := core.NewTestRegionInfo(701, 13, []byte("b1"), []byte("b2")) r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 5, StoreId: 14}, &metapb.Peer{Id: 6, StoreId: 15}) - r2 := core.NewTestRegionInfo(602, 13, []byte("b2"), []byte("b3")) + r2 := core.NewTestRegionInfo(702, 13, []byte("b2"), []byte("b3")) r2.GetMeta().Peers = append(r2.GetMeta().Peers, &metapb.Peer{Id: 7, StoreId: 14}, &metapb.Peer{Id: 8, StoreId: 15}) - r3 := core.NewTestRegionInfo(603, 13, []byte("b4"), []byte("b4")) + r3 := core.NewTestRegionInfo(703, 13, []byte("b4"), []byte("b4")) r3.GetMeta().Peers = append(r3.GetMeta().Peers, &metapb.Peer{Id: 9, StoreId: 14}, &metapb.Peer{Id: 10, StoreId: 15}) tests.MustPutRegionInfo(re, cluster, r1) tests.MustPutRegionInfo(re, cluster, r2) @@ -201,26 +236,24 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) { oc = sche.GetCoordinator().GetOperatorController() } - op1 := oc.GetOperator(601) - op2 := oc.GetOperator(602) - op3 := oc.GetOperator(603) + op1 := oc.GetOperator(701) + op2 := oc.GetOperator(702) + op3 := oc.GetOperator(703) // At least one operator used to scatter region suite.True(op1 != nil || op2 != nil || op3 != nil) - body = `{"regions_id": [601, 602, 603]}` + body = `{"regions_id": [701, 702, 703]}` err = tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/scatter", urlPrefix), []byte(body), tu.StatusOK(re)) suite.NoError(err) } func (suite *regionTestSuite) TestCheckRegionsReplicated() { - env := tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { - conf.Replication.EnablePlacementRules = true - }) - env.RunTestInPDMode(suite.checkRegionsReplicated) + // Fixme: after delete+set rule, the key range will be empty, so the test will fail in api mode. + suite.env.RunTestInPDMode(suite.checkRegionsReplicated) } func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) { + suite.pauseRuleChecker(cluster) leader := cluster.GetLeaderServer() urlPrefix := leader.GetAddr() + "/pd/api/v1" re := suite.Require() @@ -327,14 +360,28 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) suite.Equal("REPLICATED", status) } -func (suite *regionTestSuite) checkRegionCount(cluster *tests.TestCluster, count int) { +func (suite *regionTestSuite) checkRegionCount(cluster *tests.TestCluster, count uint64) { leader := cluster.GetLeaderServer() tu.Eventually(suite.Require(), func() bool { - return leader.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count == count + return leader.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count == int(count) }) if sche := cluster.GetSchedulingPrimaryServer(); sche != nil { tu.Eventually(suite.Require(), func() bool { - return sche.GetCluster().GetRegionCount([]byte{}, []byte{}) == count + return sche.GetCluster().GetRegionCount([]byte{}, []byte{}) == int(count) }) } } + +// pauseRuleChecker will pause rule checker to avoid unexpected operator. +func (suite *regionTestSuite) pauseRuleChecker(cluster *tests.TestCluster) { + re := suite.Require() + checkerName := "rule" + addr := cluster.GetLeaderServer().GetAddr() + resp := make(map[string]interface{}) + url := fmt.Sprintf("%s/pd/api/v1/checker/%s", addr, checkerName) + err := tu.CheckPostJSON(testDialClient, url, []byte(`{"delay":1000}`), tu.StatusOK(re)) + re.NoError(err) + err = tu.ReadGetJSON(re, testDialClient, url, &resp) + re.NoError(err) + re.True(resp["paused"].(bool)) +} diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index ac52362df4e..0a0c3f2fb2e 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -35,21 +35,43 @@ import ( type ruleTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestRuleTestSuite(t *testing.T) { suite.Run(t, new(ruleTestSuite)) } -func (suite *ruleTestSuite) TestSet() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, +func (suite *ruleTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + conf.PDServerCfg.KeyType = "raw" + conf.Replication.EnablePlacementRules = true + }) +} + +func (suite *ruleTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + +func (suite *ruleTestSuite) TearDownTest() { + cleanFunc := func(cluster *tests.TestCluster) { + def := placement.GroupBundle{ + ID: "pd", + Rules: []*placement.Rule{ + {GroupID: "pd", ID: "default", Role: "voter", Count: 3}, + }, + } + data, err := json.Marshal([]placement.GroupBundle{def}) + suite.NoError(err) + urlPrefix := cluster.GetLeaderServer().GetAddr() + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/pd/api/v1/config/placement-rule", data, tu.StatusOK(suite.Require())) + suite.NoError(err) } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkSet) + suite.env.RunFuncInTwoModes(cleanFunc) +} + +func (suite *ruleTestSuite) TestSet() { + suite.env.RunTestInTwoModes(suite.checkSet) } func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { @@ -165,14 +187,7 @@ func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGet() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkGet) + suite.env.RunTestInTwoModes(suite.checkGet) } func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { @@ -223,14 +238,7 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAll() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkGetAll) + suite.env.RunTestInTwoModes(suite.checkGetAll) } func (suite *ruleTestSuite) checkGetAll(cluster *tests.TestCluster) { @@ -252,14 +260,7 @@ func (suite *ruleTestSuite) checkGetAll(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestSetAll() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkSetAll) + suite.env.RunTestInTwoModes(suite.checkSetAll) } func (suite *ruleTestSuite) checkSetAll(cluster *tests.TestCluster) { @@ -375,14 +376,7 @@ func (suite *ruleTestSuite) checkSetAll(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAllByGroup() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkGetAllByGroup) + suite.env.RunTestInTwoModes(suite.checkGetAllByGroup) } func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { @@ -439,14 +433,7 @@ func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAllByRegion() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkGetAllByRegion) + suite.env.RunTestInTwoModes(suite.checkGetAllByRegion) } func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { @@ -511,14 +498,8 @@ func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAllByKey() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkGetAllByKey) + // Fixme: after delete+set rule, the key range will be empty, so the test will fail in api mode. + suite.env.RunTestInPDMode(suite.checkGetAllByKey) } func (suite *ruleTestSuite) checkGetAllByKey(cluster *tests.TestCluster) { @@ -577,14 +558,7 @@ func (suite *ruleTestSuite) checkGetAllByKey(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestDelete() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkDelete) + suite.env.RunTestInTwoModes(suite.checkDelete) } func (suite *ruleTestSuite) checkDelete(cluster *tests.TestCluster) { @@ -649,14 +623,7 @@ func (suite *ruleTestSuite) checkDelete(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestBatch() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkBatch) + suite.env.RunTestInTwoModes(suite.checkBatch) } func (suite *ruleTestSuite) checkBatch(cluster *tests.TestCluster) { @@ -785,14 +752,7 @@ func (suite *ruleTestSuite) checkBatch(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestBundle() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkBundle) + suite.env.RunTestInTwoModes(suite.checkBundle) } func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { @@ -936,14 +896,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestBundleBadRequest() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.PDServerCfg.KeyType = "raw" - conf.Replication.EnablePlacementRules = true - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkBundleBadRequest) + suite.env.RunTestInTwoModes(suite.checkBundleBadRequest) } func (suite *ruleTestSuite) checkBundleBadRequest(cluster *tests.TestCluster) { @@ -997,21 +950,26 @@ func (suite *ruleTestSuite) compareRule(r1 *placement.Rule, r2 *placement.Rule) type regionRuleTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestRegionRuleTestSuite(t *testing.T) { suite.Run(t, new(regionRuleTestSuite)) } +func (suite *regionRuleTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + conf.Replication.EnablePlacementRules = true + conf.Replication.MaxReplicas = 1 + }) +} + +func (suite *regionRuleTestSuite) TearDownSuite() { + suite.env.Cleanup() +} + func (suite *regionRuleTestSuite) TestRegionPlacementRule() { - opts := []tests.ConfigOption{ - func(conf *config.Config, serverName string) { - conf.Replication.EnablePlacementRules = true - conf.Replication.MaxReplicas = 1 - }, - } - env := tests.NewSchedulingTestEnvironment(suite.T(), opts...) - env.RunTestInTwoModes(suite.checkRegionPlacementRule) + suite.env.RunTestInTwoModes(suite.checkRegionPlacementRule) } func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCluster) { diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 69ee37d49e8..77dd22a006e 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -20,6 +20,7 @@ import ( "io" "net/http" "reflect" + "strings" "testing" "time" @@ -39,15 +40,25 @@ const apiPrefix = "/pd" type scheduleTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestScheduleTestSuite(t *testing.T) { suite.Run(t, new(scheduleTestSuite)) } +func (suite *scheduleTestSuite) SetupSuite() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/skipStoreConfigSync", `return(true)`)) + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) +} + +func (suite *scheduleTestSuite) TearDownSuite() { + suite.env.Cleanup() + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/skipStoreConfigSync")) +} + func (suite *scheduleTestSuite) TestOriginAPI() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkOriginAPI) + suite.env.RunTestInTwoModes(suite.checkOriginAPI) } func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { @@ -115,8 +126,7 @@ func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { } func (suite *scheduleTestSuite) TestAPI() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkAPI) + suite.env.RunTestInTwoModes(suite.checkAPI) } func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { @@ -566,10 +576,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { } func (suite *scheduleTestSuite) TestDisable() { - suite.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/skipStoreConfigSync", `return(true)`)) - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkDisable) - suite.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/skipStoreConfigSync")) + suite.env.RunTestInTwoModes(suite.checkDisable) } func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { @@ -678,8 +685,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(urlPrefix string, name, create } func (suite *scheduleTestSuite) TestEmptySchedulers() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkEmptySchedulers) + suite.env.RunTestInTwoModes(suite.checkEmptySchedulers) } func (suite *scheduleTestSuite) checkEmptySchedulers(cluster *tests.TestCluster) { @@ -695,20 +701,30 @@ func (suite *scheduleTestSuite) checkEmptySchedulers(cluster *tests.TestCluster) } tests.MustPutStore(suite.Require(), cluster, store) } - - // test disabled and paused schedulers - suite.checkEmptySchedulersResp(urlPrefix + "?status=disabled") - suite.checkEmptySchedulersResp(urlPrefix + "?status=paused") - - // test enabled schedulers - schedulers := make([]string, 0) - suite.NoError(tu.ReadGetJSON(re, testDialClient, urlPrefix, &schedulers)) - for _, scheduler := range schedulers { - suite.deleteScheduler(urlPrefix, scheduler) + for _, query := range []string{"", "?status=paused", "?status=disabled"} { + schedulers := make([]string, 0) + suite.NoError(tu.ReadGetJSON(re, testDialClient, urlPrefix+query, &schedulers)) + for _, scheduler := range schedulers { + if strings.Contains(query, "disable") { + input := make(map[string]interface{}) + input["name"] = scheduler + body, err := json.Marshal(input) + suite.NoError(err) + suite.addScheduler(urlPrefix, body) + } else { + suite.deleteScheduler(urlPrefix, scheduler) + } + } + tu.Eventually(re, func() bool { + resp, err := apiutil.GetJSON(testDialClient, urlPrefix+query, nil) + suite.NoError(err) + defer resp.Body.Close() + suite.Equal(http.StatusOK, resp.StatusCode) + b, err := io.ReadAll(resp.Body) + suite.NoError(err) + return strings.Contains(string(b), "[]") && !strings.Contains(string(b), "null") + }) } - suite.NoError(tu.ReadGetJSON(re, testDialClient, urlPrefix, &schedulers)) - suite.Len(schedulers, 0) - suite.checkEmptySchedulersResp(urlPrefix) } func (suite *scheduleTestSuite) assertSchedulerExists(re *require.Assertions, urlPrefix string, scheduler string) { @@ -738,14 +754,3 @@ func (suite *scheduleTestSuite) isSchedulerPaused(urlPrefix, name string) bool { } return false } - -func (suite *scheduleTestSuite) checkEmptySchedulersResp(url string) { - resp, err := apiutil.GetJSON(testDialClient, url, nil) - suite.NoError(err) - defer resp.Body.Close() - suite.Equal(http.StatusOK, resp.StatusCode) - b, err := io.ReadAll(resp.Body) - suite.NoError(err) - suite.Contains(string(b), "[]") - suite.NotContains(string(b), "null") -} diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go index eb7acb80b96..faa03c15329 100644 --- a/tests/server/config/config_test.go +++ b/tests/server/config/config_test.go @@ -86,15 +86,22 @@ func TestRateLimitConfigReload(t *testing.T) { type configTestSuite struct { suite.Suite + env *tests.SchedulingTestEnvironment } func TestConfigTestSuite(t *testing.T) { suite.Run(t, new(configTestSuite)) } +func (suite *configTestSuite) SetupSuite() { + suite.env = tests.NewSchedulingTestEnvironment(suite.T()) +} + +func (suite *configTestSuite) TearDownSuite() { + suite.env.Cleanup() +} func (suite *configTestSuite) TestConfigAll() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigAll) + suite.env.RunTestInTwoModes(suite.checkConfigAll) } func (suite *configTestSuite) checkConfigAll(cluster *tests.TestCluster) { @@ -212,8 +219,7 @@ func (suite *configTestSuite) checkConfigAll(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestConfigSchedule() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigSchedule) + suite.env.RunTestInTwoModes(suite.checkConfigSchedule) } func (suite *configTestSuite) checkConfigSchedule(cluster *tests.TestCluster) { @@ -231,14 +237,15 @@ func (suite *configTestSuite) checkConfigSchedule(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(re)) suite.NoError(err) - scheduleConfig1 := &sc.ScheduleConfig{} - suite.NoError(tu.ReadGetJSON(re, testDialClient, addr, scheduleConfig1)) - suite.Equal(*scheduleConfig1, *scheduleConfig) + tu.Eventually(re, func() bool { + scheduleConfig1 := &sc.ScheduleConfig{} + suite.NoError(tu.ReadGetJSON(re, testDialClient, addr, scheduleConfig1)) + return reflect.DeepEqual(*scheduleConfig1, *scheduleConfig) + }) } func (suite *configTestSuite) TestConfigReplication() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigReplication) + suite.env.RunTestInTwoModes(suite.checkConfigReplication) } func (suite *configTestSuite) checkConfigReplication(cluster *tests.TestCluster) { @@ -281,8 +288,7 @@ func (suite *configTestSuite) checkConfigReplication(cluster *tests.TestCluster) } func (suite *configTestSuite) TestConfigLabelProperty() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigLabelProperty) + suite.env.RunTestInTwoModes(suite.checkConfigLabelProperty) } func (suite *configTestSuite) checkConfigLabelProperty(cluster *tests.TestCluster) { @@ -334,8 +340,7 @@ func (suite *configTestSuite) checkConfigLabelProperty(cluster *tests.TestCluste } func (suite *configTestSuite) TestConfigDefault() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigDefault) + suite.env.RunTestInTwoModes(suite.checkConfigDefault) } func (suite *configTestSuite) checkConfigDefault(cluster *tests.TestCluster) { @@ -379,8 +384,7 @@ func (suite *configTestSuite) checkConfigDefault(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestConfigPDServer() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigPDServer) + suite.env.RunTestInTwoModes(suite.checkConfigPDServer) } func (suite *configTestSuite) checkConfigPDServer(cluster *tests.TestCluster) { @@ -507,8 +511,7 @@ func createTTLUrl(url string, ttl int) string { } func (suite *configTestSuite) TestConfigTTL() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkConfigTTL) + suite.env.RunTestInTwoModes(suite.checkConfigTTL) } func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { @@ -569,8 +572,7 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestTTLConflict() { - env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestInTwoModes(suite.checkTTLConflict) + suite.env.RunTestInTwoModes(suite.checkTTLConflict) } func (suite *configTestSuite) checkTTLConflict(cluster *tests.TestCluster) { diff --git a/tests/testutil.go b/tests/testutil.go index 1bdd7ae10dd..0956cf1a4bd 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -18,6 +18,8 @@ import ( "context" "fmt" "os" + "runtime" + "strings" "sync" "testing" "time" @@ -173,12 +175,19 @@ func MustPutStore(re *require.Assertions, cluster *TestCluster, store *metapb.St }) re.NoError(err) + ts := store.GetLastHeartbeat() + if ts == 0 { + ts = time.Now().UnixNano() + } storeInfo := grpcServer.GetRaftCluster().GetStore(store.GetId()) - newStore := storeInfo.Clone(core.SetStoreStats(&pdpb.StoreStats{ - Capacity: uint64(10 * units.GiB), - UsedSize: uint64(9 * units.GiB), - Available: uint64(1 * units.GiB), - })) + newStore := storeInfo.Clone( + core.SetStoreStats(&pdpb.StoreStats{ + Capacity: uint64(10 * units.GiB), + UsedSize: uint64(9 * units.GiB), + Available: uint64(1 * units.GiB), + }), + core.SetLastHeartbeatTS(time.Unix(ts/1e9, ts%1e9)), + ) grpcServer.GetRaftCluster().GetBasicCluster().PutStore(newStore) if cluster.GetSchedulingPrimaryServer() != nil { cluster.GetSchedulingPrimaryServer().GetCluster().PutStore(newStore) @@ -239,18 +248,19 @@ const ( // SchedulingTestEnvironment is used for test purpose. type SchedulingTestEnvironment struct { - t *testing.T - ctx context.Context - cancel context.CancelFunc - cluster *TestCluster - opts []ConfigOption + t *testing.T + opts []ConfigOption + clusters map[mode]*TestCluster + cancels []context.CancelFunc } // NewSchedulingTestEnvironment is to create a new SchedulingTestEnvironment. func NewSchedulingTestEnvironment(t *testing.T, opts ...ConfigOption) *SchedulingTestEnvironment { return &SchedulingTestEnvironment{ - t: t, - opts: opts, + t: t, + opts: opts, + clusters: make(map[mode]*TestCluster), + cancels: make([]context.CancelFunc, 0), } } @@ -262,62 +272,95 @@ func (s *SchedulingTestEnvironment) RunTestInTwoModes(test func(*TestCluster)) { // RunTestInPDMode is to run test in pd mode. func (s *SchedulingTestEnvironment) RunTestInPDMode(test func(*TestCluster)) { - s.t.Log("start to run test in pd mode") - s.startCluster(pdMode) - test(s.cluster) - s.cleanup() - s.t.Log("finish to run test in pd mode") + s.t.Logf("start test %s in pd mode", s.getTestName()) + if _, ok := s.clusters[pdMode]; !ok { + s.startCluster(pdMode) + } + test(s.clusters[pdMode]) +} + +func (s *SchedulingTestEnvironment) getTestName() string { + pc, _, _, _ := runtime.Caller(2) + caller := runtime.FuncForPC(pc) + if caller == nil || strings.Contains(caller.Name(), "RunTestInTwoModes") { + pc, _, _, _ = runtime.Caller(3) + caller = runtime.FuncForPC(pc) + } + if caller != nil { + elements := strings.Split(caller.Name(), ".") + return elements[len(elements)-1] + } + return "" } // RunTestInAPIMode is to run test in api mode. func (s *SchedulingTestEnvironment) RunTestInAPIMode(test func(*TestCluster)) { - s.t.Log("start to run test in api mode") re := require.New(s.t) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember", `return(true)`)) - s.startCluster(apiMode) - test(s.cluster) - s.cleanup() - re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) - re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) - s.t.Log("finish to run test in api mode") + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) + }() + s.t.Logf("start test %s in api mode", s.getTestName()) + if _, ok := s.clusters[apiMode]; !ok { + s.startCluster(apiMode) + } + test(s.clusters[apiMode]) } -func (s *SchedulingTestEnvironment) cleanup() { - s.cluster.Destroy() - s.cancel() +// RunFuncInTwoModes is to run func in two modes. +func (s *SchedulingTestEnvironment) RunFuncInTwoModes(f func(*TestCluster)) { + if c, ok := s.clusters[pdMode]; ok { + f(c) + } + if c, ok := s.clusters[apiMode]; ok { + f(c) + } +} + +// Cleanup is to cleanup the environment. +func (s *SchedulingTestEnvironment) Cleanup() { + for _, cluster := range s.clusters { + cluster.Destroy() + } + for _, cancel := range s.cancels { + cancel() + } } func (s *SchedulingTestEnvironment) startCluster(m mode) { - var err error re := require.New(s.t) - s.ctx, s.cancel = context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + s.cancels = append(s.cancels, cancel) switch m { case pdMode: - s.cluster, err = NewTestCluster(s.ctx, 1, s.opts...) + cluster, err := NewTestCluster(ctx, 1, s.opts...) re.NoError(err) - err = s.cluster.RunInitialServers() + err = cluster.RunInitialServers() re.NoError(err) - re.NotEmpty(s.cluster.WaitLeader()) - leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) + re.NotEmpty(cluster.WaitLeader()) + leaderServer := cluster.GetServer(cluster.GetLeader()) re.NoError(leaderServer.BootstrapCluster()) + s.clusters[pdMode] = cluster case apiMode: - s.cluster, err = NewTestAPICluster(s.ctx, 1, s.opts...) + cluster, err := NewTestAPICluster(ctx, 1, s.opts...) re.NoError(err) - err = s.cluster.RunInitialServers() + err = cluster.RunInitialServers() re.NoError(err) - re.NotEmpty(s.cluster.WaitLeader()) - leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) + re.NotEmpty(cluster.WaitLeader()) + leaderServer := cluster.GetServer(cluster.GetLeader()) re.NoError(leaderServer.BootstrapCluster()) leaderServer.GetRaftCluster().SetPrepared() // start scheduling cluster - tc, err := NewTestSchedulingCluster(s.ctx, 1, leaderServer.GetAddr()) + tc, err := NewTestSchedulingCluster(ctx, 1, leaderServer.GetAddr()) re.NoError(err) tc.WaitForPrimaryServing(re) - s.cluster.SetSchedulingCluster(tc) + cluster.SetSchedulingCluster(tc) time.Sleep(200 * time.Millisecond) // wait for scheduling cluster to update member testutil.Eventually(re, func() bool { - return s.cluster.GetLeaderServer().GetServer().GetRaftCluster().IsServiceIndependent(utils.SchedulingServiceName) + return cluster.GetLeaderServer().GetServer().GetRaftCluster().IsServiceIndependent(utils.SchedulingServiceName) }) + s.clusters[apiMode] = cluster } }