diff --git a/tests/pdctl/helper.go b/tests/pdctl/helper.go index c5aaf948aa23..775f0b40f15e 100644 --- a/tests/pdctl/helper.go +++ b/tests/pdctl/helper.go @@ -21,7 +21,6 @@ import ( "sort" "github.com/gogo/protobuf/proto" - "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/spf13/cobra" @@ -105,20 +104,6 @@ func MustPutStore(re *require.Assertions, svr *server.Server, store *metapb.Stor re.NoError(err) } -// MustPutStoreWithCheck is a temporary function for test purpose. -func MustPutStoreWithCheck(c *check.C, svr *server.Server, store *metapb.Store) { - store.Address = fmt.Sprintf("tikv%d", store.GetId()) - if len(store.Version) == 0 { - store.Version = versioninfo.MinSupportedVersion(versioninfo.Version2_0).String() - } - grpcServer := &server.GrpcServer{Server: svr} - _, err := grpcServer.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}, - Store: store, - }) - c.Assert(err, check.IsNil) -} - // MustPutRegion is used for test purpose. func MustPutRegion(re *require.Assertions, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { leader := &metapb.Peer{ @@ -138,25 +123,6 @@ func MustPutRegion(re *require.Assertions, cluster *tests.TestCluster, regionID, return r } -// MustPutRegionWithCheck is a temporary function for test purpose. -func MustPutRegionWithCheck(c *check.C, cluster *tests.TestCluster, regionID, storeID uint64, start, end []byte, opts ...core.RegionCreateOption) *core.RegionInfo { - leader := &metapb.Peer{ - Id: regionID, - StoreId: storeID, - } - metaRegion := &metapb.Region{ - Id: regionID, - StartKey: start, - EndKey: end, - Peers: []*metapb.Peer{leader}, - RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1}, - } - r := core.NewRegionInfo(metaRegion, leader, opts...) - err := cluster.HandleRegionHeartbeat(r) - c.Assert(err, check.IsNil) - return r -} - func checkerWithNilAssert(re *require.Assertions) *assertutil.Checker { checker := assertutil.NewChecker(func() { re.FailNow("should be nil") diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index e462adae2ba2..e99b3919619a 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -27,11 +27,12 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/apiutil/serverapi" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/pkg/typeutil" @@ -51,55 +52,47 @@ var dialClient = &http.Client{ }, } -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&serverTestSuite{}) - -type serverTestSuite struct{} - -func (s *serverTestSuite) TestReconnect(c *C) { +func TestReconnect(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) // Make connections to followers. // Make sure they proxy requests to the leader. leader := cluster.WaitLeader() for name, s := range cluster.GetServers() { if name != leader { - res, e := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") - c.Assert(e, IsNil) + res, err := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") + re.NoError(err) res.Body.Close() - c.Assert(res.StatusCode, Equals, http.StatusOK) + re.Equal(http.StatusOK, res.StatusCode) } } // Close the leader and wait for a new one. err = cluster.GetServer(leader).Stop() - c.Assert(err, IsNil) + re.NoError(err) newLeader := cluster.WaitLeader() - c.Assert(newLeader, Not(HasLen), 0) + re.NotEmpty(newLeader) // Make sure they proxy requests to the new leader. for name, s := range cluster.GetServers() { if name != leader { - testutil.WaitUntil(c, func() bool { - res, e := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") - c.Assert(e, IsNil) + testutil.Eventually(re, func() bool { + res, err := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") + re.NoError(err) defer res.Body.Close() return res.StatusCode == http.StatusOK }) @@ -107,15 +100,14 @@ func (s *serverTestSuite) TestReconnect(c *C) { } // Close the new leader and then we have only one node. - err = cluster.GetServer(newLeader).Stop() - c.Assert(err, IsNil) + re.NoError(cluster.GetServer(newLeader).Stop()) // Request will fail with no leader. for name, s := range cluster.GetServers() { if name != leader && name != newLeader { - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { res, err := http.Get(s.GetConfig().AdvertiseClientUrls + "/pd/api/v1/version") - c.Assert(err, IsNil) + re.NoError(err) defer res.Body.Close() return res.StatusCode == http.StatusServiceUnavailable }) @@ -123,77 +115,80 @@ func (s *serverTestSuite) TestReconnect(c *C) { } } -var _ = Suite(&testMiddlewareSuite{}) - -type testMiddlewareSuite struct { +type middlewareTestSuite struct { + suite.Suite cleanup func() cluster *tests.TestCluster } -func (s *testMiddlewareSuite) SetUpSuite(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/api/enableFailpointAPI", "return(true)"), IsNil) +func TestMiddlewareTestSuite(t *testing.T) { + suite.Run(t, new(middlewareTestSuite)) +} + +func (suite *middlewareTestSuite) SetupSuite() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/api/enableFailpointAPI", "return(true)")) ctx, cancel := context.WithCancel(context.Background()) - s.cleanup = cancel + suite.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 3) - c.Assert(err, IsNil) - c.Assert(cluster.RunInitialServers(), IsNil) - c.Assert(cluster.WaitLeader(), Not(HasLen), 0) - s.cluster = cluster + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) + suite.NotEmpty(cluster.WaitLeader()) + suite.cluster = cluster } -func (s *testMiddlewareSuite) TearDownSuite(c *C) { - c.Assert(failpoint.Disable("github.com/tikv/pd/server/api/enableFailpointAPI"), IsNil) - s.cleanup() - s.cluster.Destroy() +func (suite *middlewareTestSuite) TearDownSuite() { + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/api/enableFailpointAPI")) + suite.cleanup() + suite.cluster.Destroy() } -func (s *testMiddlewareSuite) TestRequestInfoMiddleware(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/api/addRequestInfoMiddleware", "return(true)"), IsNil) - leader := s.cluster.GetServer(s.cluster.GetLeader()) +func (suite *middlewareTestSuite) TestRequestInfoMiddleware() { + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/api/addRequestInfoMiddleware", "return(true)")) + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) input := map[string]interface{}{ "enable-audit": "true", } data, err := json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) labels := make(map[string]interface{}) labels["testkey"] = "testvalue" data, _ = json.Marshal(labels) resp, err = dialClient.Post(leader.GetAddr()+"/pd/api/v1/debug/pprof/profile?force=true", "application/json", bytes.NewBuffer(data)) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.NoError(err) + suite.Equal(http.StatusOK, resp.StatusCode) - c.Assert(resp.Header.Get("service-label"), Equals, "Profile") - c.Assert(resp.Header.Get("url-param"), Equals, "{\"force\":[\"true\"]}") - c.Assert(resp.Header.Get("body-param"), Equals, "{\"testkey\":\"testvalue\"}") - c.Assert(resp.Header.Get("method"), Equals, "HTTP/1.1/POST:/pd/api/v1/debug/pprof/profile") - c.Assert(resp.Header.Get("component"), Equals, "anonymous") - c.Assert(resp.Header.Get("ip"), Equals, "127.0.0.1") + suite.Equal("Profile", resp.Header.Get("service-label")) + suite.Equal("{\"force\":[\"true\"]}", resp.Header.Get("url-param")) + suite.Equal("{\"testkey\":\"testvalue\"}", resp.Header.Get("body-param")) + suite.Equal("HTTP/1.1/POST:/pd/api/v1/debug/pprof/profile", resp.Header.Get("method")) + suite.Equal("anonymous", resp.Header.Get("component")) + suite.Equal("127.0.0.1", resp.Header.Get("ip")) input = map[string]interface{}{ "enable-audit": "false", } data, err = json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, false) + suite.False(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) - header := mustRequestSuccess(c, leader.GetServer()) - c.Assert(header.Get("service-label"), Equals, "") + header := mustRequestSuccess(suite.Require(), leader.GetServer()) + suite.Equal("", header.Get("service-label")) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/api/addRequestInfoMiddleware"), IsNil) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/api/addRequestInfoMiddleware")) } func BenchmarkDoRequestWithServiceMiddleware(b *testing.B) { @@ -248,96 +243,96 @@ func doTestRequest(srv *tests.TestServer) { resp.Body.Close() } -func (s *testMiddlewareSuite) TestAuditPrometheusBackend(c *C) { - leader := s.cluster.GetServer(s.cluster.GetLeader()) +func (suite *middlewareTestSuite) TestAuditPrometheusBackend() { + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) input := map[string]interface{}{ "enable-audit": "true", } data, err := json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) timeUnix := time.Now().Unix() - 20 req, _ = http.NewRequest("GET", fmt.Sprintf("%s/pd/api/v1/trend?from=%d", leader.GetAddr(), timeUnix), nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("GET", leader.GetAddr()+"/metrics", nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) defer resp.Body.Close() content, _ := io.ReadAll(resp.Body) output := string(content) - c.Assert(strings.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 1"), Equals, true) + suite.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 1") // resign to test persist config oldLeaderName := leader.GetServer().Name() leader.GetServer().GetMember().ResignEtcdLeader(leader.GetServer().Context(), oldLeaderName, "") - mustWaitLeader(c, s.cluster.GetServers()) - leader = s.cluster.GetServer(s.cluster.GetLeader()) + suite.mustWaitLeader() + leader = suite.cluster.GetServer(suite.cluster.GetLeader()) timeUnix = time.Now().Unix() - 20 req, _ = http.NewRequest("GET", fmt.Sprintf("%s/pd/api/v1/trend?from=%d", leader.GetAddr(), timeUnix), nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("GET", leader.GetAddr()+"/metrics", nil) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) defer resp.Body.Close() content, _ = io.ReadAll(resp.Body) output = string(content) - c.Assert(strings.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 2"), Equals, true) + suite.Contains(output, "pd_service_audit_handling_seconds_count{component=\"anonymous\",method=\"HTTP\",service=\"GetTrend\"} 2") input = map[string]interface{}{ "enable-audit": "false", } data, err = json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ = http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, false) + suite.False(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) } -func (s *testMiddlewareSuite) TestAuditLocalLogBackend(c *C) { +func (suite *middlewareTestSuite) TestAuditLocalLogBackend() { tempStdoutFile, _ := os.CreateTemp("/tmp", "pd_tests") cfg := &log.Config{} cfg.File.Filename = tempStdoutFile.Name() cfg.Level = "info" lg, p, _ := log.InitLogger(cfg) log.ReplaceGlobals(lg, p) - leader := s.cluster.GetServer(s.cluster.GetLeader()) + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) input := map[string]interface{}{ "enable-audit": "true", } data, err := json.Marshal(input) - c.Assert(err, IsNil) + suite.NoError(err) req, _ := http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/service-middleware/config", bytes.NewBuffer(data)) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) resp.Body.Close() - c.Assert(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled(), Equals, true) + suite.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsAuditEnabled()) req, _ = http.NewRequest("POST", leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = dialClient.Do(req) - c.Assert(err, IsNil) + suite.NoError(err) _, err = io.ReadAll(resp.Body) resp.Body.Close() b, _ := os.ReadFile(tempStdoutFile.Name()) - c.Assert(strings.Contains(string(b), "Audit Log"), Equals, true) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.Contains(string(b), "Audit Log") + suite.NoError(err) + suite.Equal(http.StatusOK, resp.StatusCode) os.Remove(tempStdoutFile.Name()) } @@ -386,50 +381,54 @@ func BenchmarkDoRequestWithoutLocalLogAudit(b *testing.B) { cluster.Destroy() } -var _ = Suite(&testRedirectorSuite{}) - -type testRedirectorSuite struct { +type redirectorTestSuite struct { + suite.Suite cleanup func() cluster *tests.TestCluster } -func (s *testRedirectorSuite) SetUpSuite(c *C) { +func TestRedirectorTestSuite(t *testing.T) { + suite.Run(t, new(redirectorTestSuite)) +} + +func (suite *redirectorTestSuite) SetupSuite() { ctx, cancel := context.WithCancel(context.Background()) - s.cleanup = cancel + suite.cleanup = cancel cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) - c.Assert(err, IsNil) - c.Assert(cluster.RunInitialServers(), IsNil) - c.Assert(cluster.WaitLeader(), Not(HasLen), 0) - s.cluster = cluster + suite.NoError(err) + suite.NoError(cluster.RunInitialServers()) + suite.NotEmpty(cluster.WaitLeader(), 0) + suite.cluster = cluster } -func (s *testRedirectorSuite) TearDownSuite(c *C) { - s.cleanup() - s.cluster.Destroy() +func (suite *redirectorTestSuite) TearDownSuite() { + suite.cleanup() + suite.cluster.Destroy() } -func (s *testRedirectorSuite) TestRedirect(c *C) { - leader := s.cluster.GetServer(s.cluster.GetLeader()) - c.Assert(leader, NotNil) - header := mustRequestSuccess(c, leader.GetServer()) +func (suite *redirectorTestSuite) TestRedirect() { + re := suite.Require() + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) + suite.NotNil(leader) + header := mustRequestSuccess(re, leader.GetServer()) header.Del("Date") - for _, svr := range s.cluster.GetServers() { + for _, svr := range suite.cluster.GetServers() { if svr != leader { - h := mustRequestSuccess(c, svr.GetServer()) + h := mustRequestSuccess(re, svr.GetServer()) h.Del("Date") - c.Assert(header, DeepEquals, h) + suite.Equal(h, header) } } } -func (s *testRedirectorSuite) TestAllowFollowerHandle(c *C) { +func (suite *redirectorTestSuite) TestAllowFollowerHandle() { // Find a follower. var follower *server.Server - leader := s.cluster.GetServer(s.cluster.GetLeader()) - for _, svr := range s.cluster.GetServers() { + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) + for _, svr := range suite.cluster.GetServers() { if svr != leader { follower = svr.GetServer() break @@ -438,22 +437,22 @@ func (s *testRedirectorSuite) TestAllowFollowerHandle(c *C) { addr := follower.GetAddr() + "/pd/api/v1/version" request, err := http.NewRequest(http.MethodGet, addr, nil) - c.Assert(err, IsNil) + suite.NoError(err) request.Header.Add(serverapi.AllowFollowerHandle, "true") resp, err := dialClient.Do(request) - c.Assert(err, IsNil) - c.Assert(resp.Header.Get(serverapi.RedirectorHeader), Equals, "") + suite.NoError(err) + suite.Equal("", resp.Header.Get(serverapi.RedirectorHeader)) defer resp.Body.Close() - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) + suite.NoError(err) } -func (s *testRedirectorSuite) TestNotLeader(c *C) { +func (suite *redirectorTestSuite) TestNotLeader() { // Find a follower. var follower *server.Server - leader := s.cluster.GetServer(s.cluster.GetLeader()) - for _, svr := range s.cluster.GetServers() { + leader := suite.cluster.GetServer(suite.cluster.GetLeader()) + for _, svr := range suite.cluster.GetServers() { if svr != leader { follower = svr.GetServer() break @@ -463,55 +462,52 @@ func (s *testRedirectorSuite) TestNotLeader(c *C) { addr := follower.GetAddr() + "/pd/api/v1/version" // Request to follower without redirectorHeader is OK. request, err := http.NewRequest(http.MethodGet, addr, nil) - c.Assert(err, IsNil) + suite.NoError(err) resp, err := dialClient.Do(request) - c.Assert(err, IsNil) + suite.NoError(err) defer resp.Body.Close() - c.Assert(resp.StatusCode, Equals, http.StatusOK) + suite.Equal(http.StatusOK, resp.StatusCode) _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) + suite.NoError(err) // Request to follower with redirectorHeader will fail. request.RequestURI = "" request.Header.Set(serverapi.RedirectorHeader, "pd") resp1, err := dialClient.Do(request) - c.Assert(err, IsNil) + suite.NoError(err) defer resp1.Body.Close() - c.Assert(resp1.StatusCode, Not(Equals), http.StatusOK) + suite.NotEqual(http.StatusOK, resp1.StatusCode) _, err = io.ReadAll(resp1.Body) - c.Assert(err, IsNil) + suite.NoError(err) } -func mustRequestSuccess(c *C, s *server.Server) http.Header { +func mustRequestSuccess(re *require.Assertions, s *server.Server) http.Header { resp, err := dialClient.Get(s.GetAddr() + "/pd/api/v1/version") - c.Assert(err, IsNil) + re.NoError(err) defer resp.Body.Close() _, err = io.ReadAll(resp.Body) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) + re.NoError(err) + re.Equal(http.StatusOK, resp.StatusCode) return resp.Header } -var _ = Suite(&testProgressSuite{}) - -type testProgressSuite struct{} - -func (s *testProgressSuite) TestRemovingProgress(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`), IsNil) +func TestRemovingProgress(t *testing.T) { + re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 1 }) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leader := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leader.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leader.GetAddr()) clusterID := leader.GetClusterID() req := &pdpb.BootstrapRequest{ Header: testutil.NewRequestHeader(clusterID), @@ -519,7 +515,7 @@ func (s *testProgressSuite) TestRemovingProgress(c *C) { Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, } _, err = grpcPDClient.Bootstrap(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) stores := []*metapb.Store{ { Id: 1, @@ -542,92 +538,95 @@ func (s *testProgressSuite) TestRemovingProgress(c *C) { } for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leader.GetServer(), store) + pdctl.MustPutStore(re, leader.GetServer(), store) } - pdctl.MustPutRegionWithCheck(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) - pdctl.MustPutRegionWithCheck(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(30)) - pdctl.MustPutRegionWithCheck(c, cluster, 1002, 1, []byte("e"), []byte("f"), core.SetApproximateSize(50)) - pdctl.MustPutRegionWithCheck(c, cluster, 1003, 2, []byte("g"), []byte("h"), core.SetApproximateSize(40)) + pdctl.MustPutRegion(re, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(60)) + pdctl.MustPutRegion(re, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(30)) + pdctl.MustPutRegion(re, cluster, 1002, 1, []byte("e"), []byte("f"), core.SetApproximateSize(50)) + pdctl.MustPutRegion(re, cluster, 1003, 2, []byte("g"), []byte("h"), core.SetApproximateSize(40)) // no store removing - output := sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the action"), IsTrue) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the given store ID"), IsTrue) + output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the given store ID") // remove store 1 and store 2 - _ = sendRequest(c, leader.GetAddr()+"/pd/api/v1/store/1", http.MethodDelete, http.StatusOK) - _ = sendRequest(c, leader.GetAddr()+"/pd/api/v1/store/2", http.MethodDelete, http.StatusOK) + _ = sendRequest(re, leader.GetAddr()+"/pd/api/v1/store/1", http.MethodDelete, http.StatusOK) + _ = sendRequest(re, leader.GetAddr()+"/pd/api/v1/store/2", http.MethodDelete, http.StatusOK) // size is not changed. - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) var p api.Progress - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") - c.Assert(p.Progress, Equals, 0.0) - c.Assert(p.CurrentSpeed, Equals, 0.0) - c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) // update size - pdctl.MustPutRegionWithCheck(c, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) - pdctl.MustPutRegionWithCheck(c, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) + pdctl.MustPutRegion(re, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) + pdctl.MustPutRegion(re, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) // is not prepared time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") - c.Assert(p.Progress, Equals, 0.0) - c.Assert(p.CurrentSpeed, Equals, 0.0) - c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) leader.GetRaftCluster().SetPrepared() time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + + re.Equal("removing", p.Action) // store 1: (60-20)/(60+50) ~= 0.36 // store 2: (30-10)/(30+40) ~= 0.28 // average progress ~= (0.36+0.28)/2 = 0.32 - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.32") + re.Equal("0.32", fmt.Sprintf("%.2f", p.Progress)) // store 1: 40/10s = 4 // store 2: 20/10s = 2 // average speed = (2+4)/2 = 33 - c.Assert(p.CurrentSpeed, Equals, 3.0) + re.Equal(3.0, p.CurrentSpeed) // store 1: (20+50)/4 = 17.5s // store 2: (10+40)/2 = 25s // average time = (17.5+25)/2 = 21.25s - c.Assert(p.LeftSeconds, Equals, 21.25) + re.Equal(21.25, p.LeftSeconds) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "removing") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + + re.Equal("removing", p.Action) // store 2: (30-10)/(30+40) ~= 0.285 - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.29") + re.Equal("0.29", fmt.Sprintf("%.2f", p.Progress)) // store 2: 20/10s = 2 - c.Assert(p.CurrentSpeed, Equals, 2.0) + re.Equal(2.0, p.CurrentSpeed) // store 2: (10+40)/2 = 25s - c.Assert(p.LeftSeconds, Equals, 25.0) + re.Equal(25.0, p.LeftSeconds) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) } -func (s *testProgressSuite) TestPreparingProgress(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`), IsNil) +func TestPreparingProgress(t *testing.T) { + re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.Replication.MaxReplicas = 1 }) - c.Assert(err, IsNil) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leader := cluster.GetServer(cluster.GetLeader()) - grpcPDClient := testutil.MustNewGrpcClient(c, leader.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leader.GetAddr()) clusterID := leader.GetClusterID() req := &pdpb.BootstrapRequest{ Header: testutil.NewRequestHeader(clusterID), @@ -635,7 +634,7 @@ func (s *testProgressSuite) TestPreparingProgress(c *C) { Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, } _, err = grpcPDClient.Bootstrap(context.Background(), req) - c.Assert(err, IsNil) + re.NoError(err) stores := []*metapb.Store{ { Id: 1, @@ -675,80 +674,83 @@ func (s *testProgressSuite) TestPreparingProgress(c *C) { } for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leader.GetServer(), store) + pdctl.MustPutStore(re, leader.GetServer(), store) } for i := 0; i < 100; i++ { - pdctl.MustPutRegionWithCheck(c, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("p%d", i)), []byte(fmt.Sprintf("%d", i+1)), core.SetApproximateSize(10)) + pdctl.MustPutRegion(re, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("p%d", i)), []byte(fmt.Sprintf("%d", i+1)), core.SetApproximateSize(10)) } // no store preparing - output := sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the action"), IsTrue) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the given store ID"), IsTrue) + output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the given store ID") // is not prepared time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the action"), IsTrue) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) - c.Assert(strings.Contains((string(output)), "no progress found for the given store ID"), IsTrue) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) + re.Contains((string(output)), "no progress found for the given store ID") // size is not changed. leader.GetRaftCluster().SetPrepared() time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) var p api.Progress - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "preparing") - c.Assert(p.Progress, Equals, 0.0) - c.Assert(p.CurrentSpeed, Equals, 0.0) - c.Assert(p.LeftSeconds, Equals, math.MaxFloat64) + re.NoError(json.Unmarshal(output, &p)) + + re.Equal("preparing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) // update size - pdctl.MustPutRegionWithCheck(c, cluster, 1000, 4, []byte(fmt.Sprintf("%d", 1000)), []byte(fmt.Sprintf("%d", 1001)), core.SetApproximateSize(10)) - pdctl.MustPutRegionWithCheck(c, cluster, 1001, 5, []byte(fmt.Sprintf("%d", 1001)), []byte(fmt.Sprintf("%d", 1002)), core.SetApproximateSize(40)) + pdctl.MustPutRegion(re, cluster, 1000, 4, []byte(fmt.Sprintf("%d", 1000)), []byte(fmt.Sprintf("%d", 1001)), core.SetApproximateSize(10)) + pdctl.MustPutRegion(re, cluster, 1001, 5, []byte(fmt.Sprintf("%d", 1001)), []byte(fmt.Sprintf("%d", 1002)), core.SetApproximateSize(40)) time.Sleep(2 * time.Second) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "preparing") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + + re.Equal("preparing", p.Action) // store 4: 10/(210*0.9) ~= 0.05 // store 5: 40/(210*0.9) ~= 0.21 // average progress ~= (0.05+0.21)/2 = 0.13 - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.13") + re.Equal("0.13", fmt.Sprintf("%.2f", p.Progress)) // store 4: 10/10s = 1 // store 5: 40/10s = 4 // average speed = (1+4)/2 = 2.5 - c.Assert(p.CurrentSpeed, Equals, 2.5) + re.Equal(2.5, p.CurrentSpeed) // store 4: 179/1 ~= 179 // store 5: 149/4 ~= 37.25 // average time ~= (179+37.25)/2 = 108.125 - c.Assert(p.LeftSeconds, Equals, 108.125) + re.Equal(108.125, p.LeftSeconds) + + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) - output = sendRequest(c, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusOK) - c.Assert(json.Unmarshal(output, &p), IsNil) - c.Assert(p.Action, Equals, "preparing") - c.Assert(fmt.Sprintf("%.2f", p.Progress), Equals, "0.05") - c.Assert(p.CurrentSpeed, Equals, 1.0) - c.Assert(p.LeftSeconds, Equals, 179.0) + re.Equal("preparing", p.Action) + re.Equal("0.05", fmt.Sprintf("%.2f", p.Progress)) + re.Equal(1.0, p.CurrentSpeed) + re.Equal(179.0, p.LeftSeconds) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) } -func sendRequest(c *C, url string, method string, statusCode int) []byte { +func sendRequest(re *require.Assertions, url string, method string, statusCode int) []byte { req, _ := http.NewRequest(method, url, nil) resp, err := dialClient.Do(req) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, statusCode) + re.NoError(err) + re.Equal(statusCode, resp.StatusCode) output, err := io.ReadAll(resp.Body) - c.Assert(err, IsNil) + re.NoError(err) resp.Body.Close() return output } -func mustWaitLeader(c *C, svrs map[string]*tests.TestServer) *server.Server { +func (suite *middlewareTestSuite) mustWaitLeader() *server.Server { var leader *server.Server - testutil.WaitUntil(c, func() bool { - for _, s := range svrs { + testutil.Eventually(suite.Require(), func() bool { + for _, s := range suite.cluster.GetServers() { if !s.GetServer().IsClosed() && s.GetServer().GetMember().IsLeader() { leader = s.GetServer() return true diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index 662f128dd1bf..4dcf211aabc6 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -19,9 +19,9 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/statistics" @@ -30,15 +30,8 @@ import ( "github.com/tikv/pd/tests/pdctl" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&hotRegionHistorySuite{}) - -type hotRegionHistorySuite struct{} - -func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { +func TestHotRegionStorage(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -49,9 +42,9 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() stores := []*metapb.Store{ { @@ -67,16 +60,17 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) + for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) - pdctl.MustPutRegionWithCheck(c, cluster, 3, 1, []byte("e"), []byte("f")) - pdctl.MustPutRegionWithCheck(c, cluster, 4, 2, []byte("g"), []byte("h")) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f")) + pdctl.MustPutRegion(re, cluster, 4, 2, []byte("g"), []byte("h")) storeStats := []*pdpb.StoreStats{ { StoreId: 1, @@ -108,39 +102,40 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter := hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err := iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(2)) - c.Assert(next.StoreID, Equals, uint64(2)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(2), next.RegionID) + re.Equal(uint64(2), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, IsNil) - c.Assert(err, IsNil) + re.Nil(next) + re.NoError(err) iter = hotRegionStorage.NewIterator([]string{storage.ReadType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(3)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.ReadType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(3), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.ReadType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(4)) - c.Assert(next.StoreID, Equals, uint64(2)) - c.Assert(next.HotRegionType, Equals, storage.ReadType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(4), next.RegionID) + re.Equal(uint64(2), next.StoreID) + re.Equal(storage.ReadType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, IsNil) - c.Assert(err, IsNil) + re.Nil(next) + re.NoError(err) } -func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C) { +func TestHotRegionStorageReservedDayConfigChange(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) interval := 100 * time.Millisecond @@ -152,9 +147,9 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() stores := []*metapb.Store{ { @@ -170,46 +165,49 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) + for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter := hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err := iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) + schedule := leaderServer.GetConfig().Schedule // set reserved day to zero,close hot region storage schedule.HotRegionsReservedDays = 0 leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) - pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) time.Sleep(10 * interval) endTime = time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) + // set reserved day to one,open hot region storage schedule.HotRegionsReservedDays = 1 leaderServer.GetServer().SetScheduleConfig(schedule) @@ -218,20 +216,21 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(2)) - c.Assert(next.StoreID, Equals, uint64(2)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(2), next.RegionID) + re.Equal(uint64(2), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) } -func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c *C) { +func TestHotRegionStorageWriteIntervalConfigChange(t *testing.T) { + re := require.New(t) statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) interval := 100 * time.Millisecond @@ -243,9 +242,9 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c cfg.Schedule.HotRegionsReservedDays = 1 }, ) - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() stores := []*metapb.Store{ { @@ -261,45 +260,47 @@ func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c } leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) + for _, store := range stores { - pdctl.MustPutStoreWithCheck(c, leaderServer.GetServer(), store) + pdctl.MustPutStore(re, leaderServer.GetServer(), store) } defer cluster.Destroy() startTime := time.Now().UnixNano() / int64(time.Millisecond) - pdctl.MustPutRegionWithCheck(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) // wait hot scheduler starts time.Sleep(5000 * time.Millisecond) endTime := time.Now().UnixNano() / int64(time.Millisecond) hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter := hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err := iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) + schedule := leaderServer.GetConfig().Schedule // set the time to 20 times the interval schedule.HotRegionsWriteInterval.Duration = 20 * interval leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) - pdctl.MustPutRegionWithCheck(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) time.Sleep(10 * interval) endTime = time.Now().UnixNano() / int64(time.Millisecond) // it cant get new hot region because wait time smaller than hot region write interval hotRegionStorage = leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime, endTime) next, err = iter.Next() - c.Assert(next, NotNil) - c.Assert(err, IsNil) - c.Assert(next.RegionID, Equals, uint64(1)) - c.Assert(next.StoreID, Equals, uint64(1)) - c.Assert(next.HotRegionType, Equals, storage.WriteType.String()) + re.NotNil(next) + re.NoError(err) + re.Equal(uint64(1), next.RegionID) + re.Equal(uint64(1), next.StoreID) + re.Equal(storage.WriteType.String(), next.HotRegionType) next, err = iter.Next() - c.Assert(err, IsNil) - c.Assert(next, IsNil) + re.NoError(err) + re.Nil(next) }