diff --git a/pkg/mcs/scheduling/server/apis/v1/api.go b/pkg/mcs/scheduling/server/apis/v1/api.go index b34e79b9ea8..2b2a454d9b5 100644 --- a/pkg/mcs/scheduling/server/apis/v1/api.go +++ b/pkg/mcs/scheduling/server/apis/v1/api.go @@ -18,7 +18,6 @@ import ( "net/http" "strconv" "sync" - "time" "github.com/gin-contrib/cors" "github.com/gin-contrib/gzip" @@ -129,12 +128,17 @@ func (s *Service) RegisterAdminRouter() { func (s *Service) RegisterSchedulersRouter() { router := s.root.Group("schedulers") router.GET("", getSchedulers) + router.GET("/diagnostic/:name", getDiagnosticResult) + // TODO: in the future, we should split pauseOrResumeScheduler to two different APIs. + // And we need to do one-to-two forwarding in the API middleware. + router.POST("/:name", pauseOrResumeScheduler) } // RegisterCheckersRouter registers the router of the checkers handler. func (s *Service) RegisterCheckersRouter() { router := s.root.Group("checkers") router.GET("/:name", getCheckerByName) + router.POST("/:name", pauseOrResumeChecker) } // RegisterOperatorsRouter registers the router of the operators handler. @@ -304,24 +308,54 @@ func createOperator(c *gin.Context) { // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /checkers/{name} [get] func getCheckerByName(c *gin.Context) { - svr := c.MustGet(multiservicesapi.ServiceContextKey).(*scheserver.Server) + handler := c.MustGet(handlerKey).(*handler.Handler) name := c.Param("name") - co := svr.GetCoordinator() - isPaused, err := co.IsCheckerPaused(name) + output, err := handler.GetCheckerStatus(name) if err != nil { c.String(http.StatusInternalServerError, err.Error()) return } - output := map[string]bool{ - "paused": isPaused, - } c.IndentedJSON(http.StatusOK, output) } -type schedulerPausedPeriod struct { - Name string `json:"name"` - PausedAt time.Time `json:"paused_at"` - ResumeAt time.Time `json:"resume_at"` +// FIXME: details of input json body params +// @Tags checker +// @Summary Pause or resume region merge. +// @Accept json +// @Param name path string true "The name of the checker." +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Pause or resume the scheduler successfully." +// @Failure 400 {string} string "Bad format request." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /checker/{name} [post] +func pauseOrResumeChecker(c *gin.Context) { + handler := c.MustGet(handlerKey).(*handler.Handler) + var input map[string]int + if err := c.BindJSON(&input); err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + + name := c.Param("name") + t, ok := input["delay"] + if !ok { + c.String(http.StatusBadRequest, "missing pause time") + return + } + if t < 0 { + c.String(http.StatusBadRequest, "delay cannot be negative") + return + } + if err := handler.PauseOrResumeChecker(name, int64(t)); err != nil { + c.String(http.StatusInternalServerError, err.Error()) + return + } + if t == 0 { + c.String(http.StatusOK, "Resume the checker successfully.") + } else { + c.String(http.StatusOK, "Pause the checker successfully.") + } } // @Tags schedulers @@ -331,70 +365,63 @@ type schedulerPausedPeriod struct { // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /schedulers [get] func getSchedulers(c *gin.Context) { - svr := c.MustGet(multiservicesapi.ServiceContextKey).(*scheserver.Server) - co := svr.GetCoordinator() - sc := co.GetSchedulersController() - schedulers := sc.GetSchedulerNames() - + handler := c.MustGet(handlerKey).(*handler.Handler) status := c.Query("status") _, needTS := c.GetQuery("timestamp") - switch status { - case "paused": - var pausedSchedulers []string - pausedPeriods := []schedulerPausedPeriod{} - for _, scheduler := range schedulers { - paused, err := sc.IsSchedulerPaused(scheduler) - if err != nil { - c.String(http.StatusInternalServerError, err.Error()) - return - } - - if paused { - if needTS { - s := schedulerPausedPeriod{ - Name: scheduler, - PausedAt: time.Time{}, - ResumeAt: time.Time{}, - } - pausedAt, err := sc.GetPausedSchedulerDelayAt(scheduler) - if err != nil { - c.String(http.StatusInternalServerError, err.Error()) - return - } - s.PausedAt = time.Unix(pausedAt, 0) - resumeAt, err := sc.GetPausedSchedulerDelayUntil(scheduler) - if err != nil { - c.String(http.StatusInternalServerError, err.Error()) - return - } - s.ResumeAt = time.Unix(resumeAt, 0) - pausedPeriods = append(pausedPeriods, s) - } else { - pausedSchedulers = append(pausedSchedulers, scheduler) - } - } - } - if needTS { - c.IndentedJSON(http.StatusOK, pausedPeriods) - } else { - c.IndentedJSON(http.StatusOK, pausedSchedulers) - } + output, err := handler.GetSchedulerByStatus(status, needTS) + if err != nil { + c.String(http.StatusInternalServerError, err.Error()) + return + } + c.IndentedJSON(http.StatusOK, output) +} + +// @Tags schedulers +// @Summary List schedulers diagnostic result. +// @Produce json +// @Success 200 {array} string +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /schedulers/diagnostic/{name} [get] +func getDiagnosticResult(c *gin.Context) { + handler := c.MustGet(handlerKey).(*handler.Handler) + name := c.Param("name") + result, err := handler.GetDiagnosticResult(name) + if err != nil { + c.String(http.StatusInternalServerError, err.Error()) + return + } + c.IndentedJSON(http.StatusOK, result) +} + +// FIXME: details of input json body params +// @Tags scheduler +// @Summary Pause or resume a scheduler. +// @Accept json +// @Param name path string true "The name of the scheduler." +// @Param body body object true "json params" +// @Produce json +// @Success 200 {string} string "Pause or resume the scheduler successfully." +// @Failure 400 {string} string "Bad format request." +// @Failure 500 {string} string "PD server failed to proceed the request." +// @Router /schedulers/{name} [post] +func pauseOrResumeScheduler(c *gin.Context) { + handler := c.MustGet(handlerKey).(*handler.Handler) + + var input map[string]int64 + if err := c.BindJSON(&input); err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + + name := c.Param("name") + t, ok := input["delay"] + if !ok { + c.String(http.StatusBadRequest, "missing pause time") + return + } + if err := handler.PauseOrResumeScheduler(name, t); err != nil { + c.String(http.StatusInternalServerError, err.Error()) return - case "disabled": - var disabledSchedulers []string - for _, scheduler := range schedulers { - disabled, err := sc.IsSchedulerDisabled(scheduler) - if err != nil { - c.String(http.StatusInternalServerError, err.Error()) - return - } - - if disabled { - disabledSchedulers = append(disabledSchedulers, scheduler) - } - } - c.IndentedJSON(http.StatusOK, disabledSchedulers) - default: - c.IndentedJSON(http.StatusOK, schedulers) } + c.String(http.StatusOK, "Pause or resume the scheduler successfully.") } diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index d9c162ac1cc..d45fd685fa2 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -33,7 +33,9 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/scatter" + "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/utils/typeutil" + "go.uber.org/zap" ) // Server is the interface for handler about schedule. @@ -720,3 +722,152 @@ func parseStoreIDsAndPeerRole(ids interface{}, roles interface{}) (map[uint64]pl } return storeIDToPeerRole, true } + +// GetCheckerStatus returns the status of the checker. +func (h *Handler) GetCheckerStatus(name string) (map[string]bool, error) { + co := h.GetCoordinator() + if co == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } + isPaused, err := co.IsCheckerPaused(name) + if err != nil { + return nil, err + } + return map[string]bool{ + "paused": isPaused, + }, nil +} + +// GetSchedulerNames returns all names of schedulers. +func (h *Handler) GetSchedulerNames() ([]string, error) { + co := h.GetCoordinator() + if co == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } + return co.GetSchedulersController().GetSchedulerNames(), nil +} + +type schedulerPausedPeriod struct { + Name string `json:"name"` + PausedAt time.Time `json:"paused_at"` + ResumeAt time.Time `json:"resume_at"` +} + +// GetSchedulerByStatus returns all names of schedulers by status. +func (h *Handler) GetSchedulerByStatus(status string, needTS bool) (interface{}, error) { + co := h.GetCoordinator() + if co == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } + sc := co.GetSchedulersController() + schedulers := sc.GetSchedulerNames() + switch status { + case "paused": + var pausedSchedulers []string + pausedPeriods := []schedulerPausedPeriod{} + for _, scheduler := range schedulers { + paused, err := sc.IsSchedulerPaused(scheduler) + if err != nil { + return nil, err + } + if paused { + if needTS { + s := schedulerPausedPeriod{ + Name: scheduler, + PausedAt: time.Time{}, + ResumeAt: time.Time{}, + } + pausedAt, err := sc.GetPausedSchedulerDelayAt(scheduler) + if err != nil { + return nil, err + } + s.PausedAt = time.Unix(pausedAt, 0) + resumeAt, err := sc.GetPausedSchedulerDelayUntil(scheduler) + if err != nil { + return nil, err + } + s.ResumeAt = time.Unix(resumeAt, 0) + pausedPeriods = append(pausedPeriods, s) + } else { + pausedSchedulers = append(pausedSchedulers, scheduler) + } + } + } + if needTS { + return pausedPeriods, nil + } + return pausedSchedulers, nil + case "disabled": + var disabledSchedulers []string + for _, scheduler := range schedulers { + disabled, err := sc.IsSchedulerDisabled(scheduler) + if err != nil { + return nil, err + } + if disabled { + disabledSchedulers = append(disabledSchedulers, scheduler) + } + } + return disabledSchedulers, nil + default: + return schedulers, nil + } +} + +// GetDiagnosticResult returns the diagnostic results of the specified scheduler. +func (h *Handler) GetDiagnosticResult(name string) (*schedulers.DiagnosticResult, error) { + if _, ok := schedulers.DiagnosableSummaryFunc[name]; !ok { + return nil, errs.ErrSchedulerUndiagnosable.FastGenByArgs(name) + } + co := h.GetCoordinator() + if co == nil { + return nil, errs.ErrNotBootstrapped.GenWithStackByArgs() + } + result, err := co.GetDiagnosticResult(name) + if err != nil { + return nil, err + } + return result, nil +} + +// PauseOrResumeScheduler pauses a scheduler for delay seconds or resume a paused scheduler. +// t == 0 : resume scheduler. +// t > 0 : scheduler delays t seconds. +func (h *Handler) PauseOrResumeScheduler(name string, t int64) (err error) { + co := h.GetCoordinator() + if co == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } + if err = co.GetSchedulersController().PauseOrResumeScheduler(name, t); err != nil { + if t == 0 { + log.Error("can not resume scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) + } else { + log.Error("can not pause scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) + } + } else { + if t == 0 { + log.Info("resume scheduler successfully", zap.String("scheduler-name", name)) + } else { + log.Info("pause scheduler successfully", zap.String("scheduler-name", name), zap.Int64("pause-seconds", t)) + } + } + return err +} + +// PauseOrResumeChecker pauses checker for delay seconds or resume checker +// t == 0 : resume checker. +// t > 0 : checker delays t seconds. +func (h *Handler) PauseOrResumeChecker(name string, t int64) (err error) { + co := h.GetCoordinator() + if co == nil { + return errs.ErrNotBootstrapped.GenWithStackByArgs() + } + if err = co.PauseOrResumeChecker(name, t); err != nil { + if t == 0 { + log.Error("can not resume checker", zap.String("checker-name", name), errs.ZapError(err)) + } else { + log.Error("can not pause checker", zap.String("checker-name", name), errs.ZapError(err)) + } + } + return err +} diff --git a/pkg/utils/apiutil/serverapi/middleware.go b/pkg/utils/apiutil/serverapi/middleware.go index 063ad042dbb..19438ad0f91 100644 --- a/pkg/utils/apiutil/serverapi/middleware.go +++ b/pkg/utils/apiutil/serverapi/middleware.go @@ -112,6 +112,9 @@ func (h *redirector) matchMicroServiceRedirectRules(r *http.Request) (bool, stri if len(h.microserviceRedirectRules) == 0 { return false, "" } + // Remove trailing '/' from the URL path + // It will be helpful when matching the redirect rules "schedulers" or "schedulers/{name}" + r.URL.Path = strings.TrimRight(r.URL.Path, "/") for _, rule := range h.microserviceRedirectRules { if strings.HasPrefix(r.URL.Path, rule.matchPath) && slice.Contains(rule.matchMethods, r.Method) { addr, ok := h.s.GetServicePrimaryAddr(r.Context(), rule.targetServiceName) @@ -131,6 +134,8 @@ func (h *redirector) matchMicroServiceRedirectRules(r *http.Request) (bool, stri } else { r.URL.Path = rule.targetPath } + log.Debug("redirect to micro service", zap.String("path", r.URL.Path), zap.String("target", addr), + zap.String("method", r.Method)) return true, addr } } diff --git a/server/api/checker.go b/server/api/checker.go index 09dc81366b9..709e641c37b 100644 --- a/server/api/checker.go +++ b/server/api/checker.go @@ -83,13 +83,10 @@ func (c *checkerHandler) PauseOrResumeChecker(w http.ResponseWriter, r *http.Req // @Router /checker/{name} [get] func (c *checkerHandler) GetCheckerStatus(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] - isPaused, err := c.IsCheckerPaused(name) + output, err := c.Handler.GetCheckerStatus(name) if err != nil { c.r.JSON(w, http.StatusInternalServerError, err.Error()) return } - output := map[string]bool{ - "paused": isPaused, - } c.r.JSON(w, http.StatusOK, output) } diff --git a/server/api/diagnostic.go b/server/api/diagnostic.go index f83f9c83efb..1a05b0d83b8 100644 --- a/server/api/diagnostic.go +++ b/server/api/diagnostic.go @@ -18,32 +18,27 @@ import ( "net/http" "github.com/gorilla/mux" - "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/server" "github.com/unrolled/render" ) type diagnosticHandler struct { - svr *server.Server - rd *render.Render + handler *server.Handler + svr *server.Server + rd *render.Render } func newDiagnosticHandler(svr *server.Server, rd *render.Render) *diagnosticHandler { return &diagnosticHandler{ - svr: svr, - rd: rd, + handler: svr.GetHandler(), + svr: svr, + rd: rd, } } func (h *diagnosticHandler) GetDiagnosticResult(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] - if _, ok := schedulers.DiagnosableSummaryFunc[name]; !ok { - h.rd.JSON(w, http.StatusBadRequest, errs.ErrSchedulerUndiagnosable.FastGenByArgs(name).Error()) - return - } - rc := getCluster(r) - result, err := rc.GetCoordinator().GetDiagnosticResult(name) + result, err := h.handler.GetDiagnosticResult(name) if err != nil { h.rd.JSON(w, http.StatusInternalServerError, err.Error()) return diff --git a/server/api/scheduler.go b/server/api/scheduler.go index c2691ea9826..dea798edb39 100644 --- a/server/api/scheduler.go +++ b/server/api/scheduler.go @@ -18,7 +18,6 @@ import ( "net/http" "net/url" "strings" - "time" "github.com/gorilla/mux" "github.com/pingcap/errors" @@ -43,12 +42,6 @@ func newSchedulerHandler(svr *server.Server, r *render.Render) *schedulerHandler } } -type schedulerPausedPeriod struct { - Name string `json:"name"` - PausedAt time.Time `json:"paused_at"` - ResumeAt time.Time `json:"resume_at"` -} - // @Tags scheduler // @Summary List all created schedulers by status. // @Produce json @@ -56,73 +49,14 @@ type schedulerPausedPeriod struct { // @Failure 500 {string} string "PD server failed to proceed the request." // @Router /schedulers [get] func (h *schedulerHandler) GetSchedulers(w http.ResponseWriter, r *http.Request) { - schedulers, err := h.Handler.GetSchedulers() + status := r.URL.Query().Get("status") + _, needTS := r.URL.Query()["timestamp"] + output, err := h.Handler.GetSchedulerByStatus(status, needTS) if err != nil { h.r.JSON(w, http.StatusInternalServerError, err.Error()) return } - - status := r.URL.Query().Get("status") - _, tsFlag := r.URL.Query()["timestamp"] - switch status { - case "paused": - var pausedSchedulers []string - pausedPeriods := []schedulerPausedPeriod{} - for _, scheduler := range schedulers { - paused, err := h.Handler.IsSchedulerPaused(scheduler) - if err != nil { - h.r.JSON(w, http.StatusInternalServerError, err.Error()) - return - } - - if paused { - if tsFlag { - s := schedulerPausedPeriod{ - Name: scheduler, - PausedAt: time.Time{}, - ResumeAt: time.Time{}, - } - pausedAt, err := h.Handler.GetPausedSchedulerDelayAt(scheduler) - if err != nil { - h.r.JSON(w, http.StatusInternalServerError, err.Error()) - return - } - s.PausedAt = time.Unix(pausedAt, 0) - resumeAt, err := h.Handler.GetPausedSchedulerDelayUntil(scheduler) - if err != nil { - h.r.JSON(w, http.StatusInternalServerError, err.Error()) - return - } - s.ResumeAt = time.Unix(resumeAt, 0) - pausedPeriods = append(pausedPeriods, s) - } else { - pausedSchedulers = append(pausedSchedulers, scheduler) - } - } - } - if tsFlag { - h.r.JSON(w, http.StatusOK, pausedPeriods) - } else { - h.r.JSON(w, http.StatusOK, pausedSchedulers) - } - return - case "disabled": - var disabledSchedulers []string - for _, scheduler := range schedulers { - disabled, err := h.Handler.IsSchedulerDisabled(scheduler) - if err != nil { - h.r.JSON(w, http.StatusInternalServerError, err.Error()) - return - } - - if disabled { - disabledSchedulers = append(disabledSchedulers, scheduler) - } - } - h.r.JSON(w, http.StatusOK, disabledSchedulers) - default: - h.r.JSON(w, http.StatusOK, schedulers) - } + h.r.JSON(w, http.StatusOK, output) } // FIXME: details of input json body params diff --git a/server/api/server.go b/server/api/server.go index 0094d8eb5dd..19583bb9a12 100644 --- a/server/api/server.go +++ b/server/api/server.go @@ -39,6 +39,22 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP prefix := apiPrefix + "/api/v1" r := createRouter(apiPrefix, svr) router := mux.NewRouter() + + // Following requests are redirected: + // "/admin/reset-ts", http.MethodPost + // "/operators", http.MethodGet + // "/operators", http.MethodPost + // "/operators/records",http.MethodGet + // "/operators/{region_id}", http.MethodGet + // "/operators/{region_id}", http.MethodDelete + // "/checker/{name}", http.MethodPost + // "/checker/{name}", http.MethodGet + // "/schedulers", http.MethodGet + // "/schedulers/{name}", http.MethodPost + // "/schedulers/diagnostic/{name}", http.MethodGet + // Following requests are **not** redirected: + // "/schedulers", http.MethodPost + // "/schedulers/{name}", http.MethodDelete router.PathPrefix(apiPrefix).Handler(negroni.New( serverapi.NewRuntimeServiceValidator(svr, group), serverapi.NewRedirector(svr, @@ -52,18 +68,23 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP scheapi.APIPathPrefix+"/operators", mcs.SchedulingServiceName, []string{http.MethodPost, http.MethodGet, http.MethodDelete}), - // because the writing of all the meta information of the scheduling service is in the API server, - // we only forward read-only requests about checkers and schedulers to the scheduling service. serverapi.MicroserviceRedirectRule( prefix+"/checker", // Note: this is a typo in the original code scheapi.APIPathPrefix+"/checkers", mcs.SchedulingServiceName, - []string{http.MethodGet}), + []string{http.MethodPost, http.MethodGet}), + // because the writing of all the meta information of the scheduling service is in the API server, + // we should not post and delete the scheduler directly in the scheduling service. serverapi.MicroserviceRedirectRule( prefix+"/schedulers", scheapi.APIPathPrefix+"/schedulers", mcs.SchedulingServiceName, []string{http.MethodGet}), + serverapi.MicroserviceRedirectRule( + prefix+"/schedulers/", // Note: this means "/schedulers/{name}" + scheapi.APIPathPrefix+"/schedulers", + mcs.SchedulingServiceName, + []string{http.MethodPost}), // TODO: we need to consider the case that v1 api not support restful api. // we might change the previous path parameters to query parameters. ), diff --git a/server/handler.go b/server/handler.go index ace7592cd7c..7fb74d5c715 100644 --- a/server/handler.go +++ b/server/handler.go @@ -92,24 +92,6 @@ func (h *Handler) GetRaftCluster() (*cluster.RaftCluster, error) { return rc, nil } -// IsSchedulerPaused returns whether scheduler is paused. -func (h *Handler) IsSchedulerPaused(name string) (bool, error) { - rc, err := h.GetRaftCluster() - if err != nil { - return false, err - } - return rc.GetCoordinator().GetSchedulersController().IsSchedulerPaused(name) -} - -// IsSchedulerDisabled returns whether scheduler is disabled. -func (h *Handler) IsSchedulerDisabled(name string) (bool, error) { - rc, err := h.GetRaftCluster() - if err != nil { - return false, err - } - return rc.GetCoordinator().GetSchedulersController().IsSchedulerDisabled(name) -} - // IsSchedulerExisted returns whether scheduler is existed. func (h *Handler) IsSchedulerExisted(name string) (bool, error) { rc, err := h.GetRaftCluster() @@ -124,24 +106,6 @@ func (h *Handler) GetScheduleConfig() *sc.ScheduleConfig { return h.s.GetScheduleConfig() } -// GetSchedulers returns all names of schedulers. -func (h *Handler) GetSchedulers() ([]string, error) { - c, err := h.GetRaftCluster() - if err != nil { - return nil, err - } - return c.GetSchedulers(), nil -} - -// IsCheckerPaused returns if checker is paused -func (h *Handler) IsCheckerPaused(name string) (bool, error) { - rc, err := h.GetRaftCluster() - if err != nil { - return false, err - } - return rc.GetCoordinator().IsCheckerPaused(name) -} - // GetStores returns all stores in the cluster. func (h *Handler) GetStores() ([]*core.StoreInfo, error) { rc := h.s.GetRaftCluster() @@ -269,48 +233,6 @@ func (h *Handler) RemoveScheduler(name string) error { return err } -// PauseOrResumeScheduler pauses a scheduler for delay seconds or resume a paused scheduler. -// t == 0 : resume scheduler. -// t > 0 : scheduler delays t seconds. -func (h *Handler) PauseOrResumeScheduler(name string, t int64) error { - c, err := h.GetRaftCluster() - if err != nil { - return err - } - if err = c.PauseOrResumeScheduler(name, t); err != nil { - if t == 0 { - log.Error("can not resume scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) - } else { - log.Error("can not pause scheduler", zap.String("scheduler-name", name), errs.ZapError(err)) - } - } else { - if t == 0 { - log.Info("resume scheduler successfully", zap.String("scheduler-name", name)) - } else { - log.Info("pause scheduler successfully", zap.String("scheduler-name", name), zap.Int64("pause-seconds", t)) - } - } - return err -} - -// PauseOrResumeChecker pauses checker for delay seconds or resume checker -// t == 0 : resume checker. -// t > 0 : checker delays t seconds. -func (h *Handler) PauseOrResumeChecker(name string, t int64) error { - c, err := h.GetRaftCluster() - if err != nil { - return err - } - if err = c.PauseOrResumeChecker(name, t); err != nil { - if t == 0 { - log.Error("can not resume checker", zap.String("checker-name", name), errs.ZapError(err)) - } else { - log.Error("can not pause checker", zap.String("checker-name", name), errs.ZapError(err)) - } - } - return err -} - // AddBalanceLeaderScheduler adds a balance-leader-scheduler. func (h *Handler) AddBalanceLeaderScheduler() error { return h.AddScheduler(schedulers.BalanceLeaderType) @@ -680,21 +602,3 @@ func (h *Handler) AddEvictOrGrant(storeID float64, name string) error { } return nil } - -// GetPausedSchedulerDelayAt returns paused unix timestamp when a scheduler is paused -func (h *Handler) GetPausedSchedulerDelayAt(name string) (int64, error) { - rc, err := h.GetRaftCluster() - if err != nil { - return -1, err - } - return rc.GetPausedSchedulerDelayAt(name) -} - -// GetPausedSchedulerDelayUntil returns resume unix timestamp when a scheduler is paused -func (h *Handler) GetPausedSchedulerDelayUntil(name string) (int64, error) { - rc, err := h.GetRaftCluster() - if err != nil { - return -1, err - } - return rc.GetPausedSchedulerDelayUntil(name) -} diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index e91d3cd633e..1e5f9baf1bd 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -112,16 +112,16 @@ func (suite *apiTestSuite) TestGetCheckerByName() { func (suite *apiTestSuite) TestAPIForward() { re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader", "return(true)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader")) + }() + tc, err := tests.NewTestSchedulingCluster(suite.ctx, 2, suite.backendEndpoints) re.NoError(err) defer tc.Destroy() tc.WaitForPrimaryServing(re) - failpoint.Enable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader", "return(true)") - defer func() { - failpoint.Disable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader") - }() - urlPrefix := fmt.Sprintf("%s/pd/api/v1", suite.backendEndpoints) var slice []string var resp map[string]interface{} @@ -148,7 +148,7 @@ func (suite *apiTestSuite) TestAPIForward() { testutil.StatusNotOK(re), testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) re.NoError(err) - // Test checker: only read-only requests are forwarded + // Test checker: err = testutil.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "checker/merge"), &resp, testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) re.NoError(err) @@ -159,10 +159,17 @@ func (suite *apiTestSuite) TestAPIForward() { pauseArgs, err := json.Marshal(input) suite.NoError(err) err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "checker/merge"), pauseArgs, - testutil.StatusOK(re), testutil.WithoutHeader(re, apiutil.PDRedirectorHeader)) + testutil.StatusOK(re), testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) suite.NoError(err) - // Test scheduler: only read-only requests are forwarded + // Test scheduler: + // Need to redirect: + // "/schedulers", http.MethodGet + // "/schedulers/{name}", http.MethodPost + // "/schedulers/diagnostic/{name}", http.MethodGet + // Should not redirect: + // "/schedulers", http.MethodPost + // "/schedulers/{name}", http.MethodDelete err = testutil.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers"), &slice, testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) re.NoError(err) @@ -171,7 +178,19 @@ func (suite *apiTestSuite) TestAPIForward() { input["delay"] = 30 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/all"), pauseArgs, - testutil.StatusOK(re), testutil.WithoutHeader(re, apiutil.ForwardToMicroServiceHeader)) + err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/balance-leader-scheduler"), pauseArgs, + testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) + suite.NoError(err) + + err = testutil.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/diagnostic/balance-leader-scheduler"), &resp, + testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) suite.NoError(err) + + err = testutil.CheckPostJSON(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers"), pauseArgs, + testutil.WithoutHeader(re, apiutil.ForwardToMicroServiceHeader)) + re.NoError(err) + + err = testutil.CheckDelete(testDialClient, fmt.Sprintf("%s/%s", urlPrefix, "schedulers/balance-leader-scheduler"), + testutil.WithoutHeader(re, apiutil.ForwardToMicroServiceHeader)) + re.NoError(err) } diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 45c25f01d1e..0760bd1fbb5 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -264,6 +264,17 @@ func (suite *serverTestSuite) TestSchedulerSync() { checkEvictLeaderSchedulerExist(re, schedulersController, false) // TODO: test more schedulers. + // Fixme: the following code will fail because the scheduler is not removed but not synced. + // checkDelete := func(schedulerName string) { + // re.NotNil(schedulersController.GetScheduler(schedulers.BalanceLeaderName) != nil) + // api.MustDeleteScheduler(re, suite.backendEndpoints, schedulers.BalanceLeaderName) + // testutil.Eventually(re, func() bool { + // return schedulersController.GetScheduler(schedulers.BalanceLeaderName) == nil + // }) + // } + // checkDelete(schedulers.BalanceLeaderName) + // checkDelete(schedulers.BalanceRegionName) + // checkDelete(schedulers.HotRegionName) } func checkEvictLeaderSchedulerExist(re *require.Assertions, sc *schedulers.Controller, exist bool) { diff --git a/tests/integrations/mcs/tso/api_test.go b/tests/integrations/mcs/tso/api_test.go index 7e870fbc198..81cc798851f 100644 --- a/tests/integrations/mcs/tso/api_test.go +++ b/tests/integrations/mcs/tso/api_test.go @@ -30,6 +30,7 @@ import ( apis "github.com/tikv/pd/pkg/mcs/tso/server/apis/v1" mcsutils "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" @@ -100,6 +101,11 @@ func (suite *tsoAPITestSuite) TestGetKeyspaceGroupMembers() { func (suite *tsoAPITestSuite) TestForwardResetTS() { re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader", "return(true)")) + defer func() { + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/utils/apiutil/serverapi/checkHeader")) + }() + primary := suite.tsoCluster.WaitForDefaultPrimaryServing(re) re.NotNil(primary) url := suite.backendEndpoints + "/pd/api/v1/admin/reset-ts" @@ -107,13 +113,13 @@ func (suite *tsoAPITestSuite) TestForwardResetTS() { // Test reset ts input := []byte(`{"tso":"121312", "force-use-larger":true}`) err := testutil.CheckPostJSON(dialClient, url, input, - testutil.StatusOK(re), testutil.StringContain(re, "Reset ts successfully")) + testutil.StatusOK(re), testutil.StringContain(re, "Reset ts successfully"), testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) suite.NoError(err) // Test reset ts with invalid tso input = []byte(`{}`) err = testutil.CheckPostJSON(dialClient, url, input, - testutil.StatusNotOK(re), testutil.StringContain(re, "invalid tso value")) + testutil.StatusNotOK(re), testutil.StringContain(re, "invalid tso value"), testutil.WithHeader(re, apiutil.ForwardToMicroServiceHeader, "true")) re.NoError(err) } diff --git a/tests/pdctl/scheduler/scheduler_test.go b/tests/pdctl/scheduler/scheduler_test.go index f2d44a589a4..b7cce7722c5 100644 --- a/tests/pdctl/scheduler/scheduler_test.go +++ b/tests/pdctl/scheduler/scheduler_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/spf13/cobra" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/utils/testutil" @@ -32,16 +33,23 @@ import ( pdctlCmd "github.com/tikv/pd/tools/pd-ctl/pdctl" ) -func TestScheduler(t *testing.T) { - re := require.New(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1) - re.NoError(err) - defer cluster.Destroy() - err = cluster.RunInitialServers() - re.NoError(err) - cluster.WaitLeader() +type schedulerTestSuite struct { + suite.Suite +} + +func TestSchedulerTestSuite(t *testing.T) { + suite.Run(t, new(schedulerTestSuite)) +} + +func (suite *schedulerTestSuite) TestScheduler() { + env := tests.NewSchedulingTestEnvironment(suite.T()) + // Fixme: use RunTestInTwoModes when sync deleted scheduler is supported. + env.RunTestInPDMode(suite.checkScheduler) + env.RunTestInTwoModes(suite.checkSchedulerDiagnostic) +} + +func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) { + re := suite.Require() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -85,17 +93,13 @@ func TestScheduler(t *testing.T) { } } - checkSchedulerConfigCommand := func(args []string, expectedConfig map[string]interface{}, schedulerName string) { - if args != nil { - mustExec(re, cmd, args, nil) - } + checkSchedulerConfigCommand := func(expectedConfig map[string]interface{}, schedulerName string) { configInfo := make(map[string]interface{}) mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", schedulerName}, &configInfo) re.Equal(expectedConfig, configInfo) } leaderServer := cluster.GetLeaderServer() - re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { tests.MustPutStore(re, cluster, store) } @@ -116,6 +120,7 @@ func TestScheduler(t *testing.T) { // scheduler delete command args := []string{"-u", pdAddr, "scheduler", "remove", "balance-region-scheduler"} + time.Sleep(10 * time.Second) expected = map[string]bool{ "balance-leader-scheduler": true, "balance-hot-region-scheduler": true, @@ -141,7 +146,7 @@ func TestScheduler(t *testing.T) { // scheduler config show command expectedConfig := make(map[string]interface{}) expectedConfig["store-id-ranges"] = map[string]interface{}{"2": []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}}} - checkSchedulerConfigCommand(nil, expectedConfig, schedulers[idx]) + checkSchedulerConfigCommand(expectedConfig, schedulers[idx]) // scheduler config update command args = []string{"-u", pdAddr, "scheduler", "config", schedulers[idx], "add-store", "3"} @@ -156,7 +161,7 @@ func TestScheduler(t *testing.T) { // check update success expectedConfig["store-id-ranges"] = map[string]interface{}{"2": []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}}, "3": []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}}} - checkSchedulerConfigCommand(nil, expectedConfig, schedulers[idx]) + checkSchedulerConfigCommand(expectedConfig, schedulers[idx]) // scheduler delete command args = []string{"-u", pdAddr, "scheduler", "remove", schedulers[idx]} @@ -192,7 +197,7 @@ func TestScheduler(t *testing.T) { // check add success expectedConfig["store-id-ranges"] = map[string]interface{}{"2": []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}}, "4": []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}}} - checkSchedulerConfigCommand(nil, expectedConfig, schedulers[idx]) + checkSchedulerConfigCommand(expectedConfig, schedulers[idx]) // scheduler remove command [old] args = []string{"-u", pdAddr, "scheduler", "remove", schedulers[idx] + "-4"} @@ -207,7 +212,7 @@ func TestScheduler(t *testing.T) { // check remove success expectedConfig["store-id-ranges"] = map[string]interface{}{"2": []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}}} - checkSchedulerConfigCommand(nil, expectedConfig, schedulers[idx]) + checkSchedulerConfigCommand(expectedConfig, schedulers[idx]) // scheduler remove command, when remove the last store, it should remove whole scheduler args = []string{"-u", pdAddr, "scheduler", "remove", schedulers[idx] + "-2"} @@ -403,10 +408,7 @@ func TestScheduler(t *testing.T) { re.Contains(echo, "Success!") // test show scheduler with paused and disabled status. - checkSchedulerWithStatusCommand := func(args []string, status string, expected []string) { - if args != nil { - mustExec(re, cmd, args, nil) - } + checkSchedulerWithStatusCommand := func(status string, expected []string) { var schedulers []string mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show", "--status", status}, &schedulers) re.Equal(expected, schedulers) @@ -414,7 +416,7 @@ func TestScheduler(t *testing.T) { mustUsage([]string{"-u", pdAddr, "scheduler", "pause", "balance-leader-scheduler"}) mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "pause", "balance-leader-scheduler", "60"}, nil) - checkSchedulerWithStatusCommand(nil, "paused", []string{ + checkSchedulerWithStatusCommand("paused", []string{ "balance-leader-scheduler", }) result := make(map[string]interface{}) @@ -425,7 +427,7 @@ func TestScheduler(t *testing.T) { mustUsage([]string{"-u", pdAddr, "scheduler", "resume", "balance-leader-scheduler", "60"}) mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "resume", "balance-leader-scheduler"}, nil) - checkSchedulerWithStatusCommand(nil, "paused", nil) + checkSchedulerWithStatusCommand("paused", nil) // set label scheduler to disabled manually. echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "add", "label-scheduler"}, nil) @@ -433,26 +435,18 @@ func TestScheduler(t *testing.T) { cfg := leaderServer.GetServer().GetScheduleConfig() origin := cfg.Schedulers cfg.Schedulers = sc.SchedulerConfigs{{Type: "label", Disable: true}} - err = leaderServer.GetServer().SetScheduleConfig(*cfg) + err := leaderServer.GetServer().SetScheduleConfig(*cfg) re.NoError(err) - checkSchedulerWithStatusCommand(nil, "disabled", []string{"label-scheduler"}) + checkSchedulerWithStatusCommand("disabled", []string{"label-scheduler"}) // reset Schedulers in ScheduleConfig cfg.Schedulers = origin err = leaderServer.GetServer().SetScheduleConfig(*cfg) re.NoError(err) - checkSchedulerWithStatusCommand(nil, "disabled", nil) + checkSchedulerWithStatusCommand("disabled", nil) } -func TestSchedulerDiagnostic(t *testing.T) { - re := require.New(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1) - re.NoError(err) - defer cluster.Destroy() - err = cluster.RunInitialServers() - re.NoError(err) - cluster.WaitLeader() +func (suite *schedulerTestSuite) checkSchedulerDiagnostic(cluster *tests.TestCluster) { + re := suite.Require() pdAddr := cluster.GetConfig().GetClientURL() cmd := pdctlCmd.GetRootCmd() @@ -488,8 +482,6 @@ func TestSchedulerDiagnostic(t *testing.T) { LastHeartbeat: time.Now().UnixNano(), }, } - leaderServer := cluster.GetLeaderServer() - re.NoError(leaderServer.BootstrapCluster()) for _, store := range stores { tests.MustPutStore(re, cluster, store) } @@ -503,9 +495,11 @@ func TestSchedulerDiagnostic(t *testing.T) { checkSchedulerDescribeCommand("balance-region-scheduler", "pending", "1 store(s) RegionNotMatchRule; ") // scheduler delete command - mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", "balance-region-scheduler"}, nil) - - checkSchedulerDescribeCommand("balance-region-scheduler", "disabled", "") + // Fixme: use RunTestInTwoModes when sync deleted scheduler is supported. + if sche := cluster.GetSchedulingPrimaryServer(); sche == nil { + mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", "balance-region-scheduler"}, nil) + checkSchedulerDescribeCommand("balance-region-scheduler", "disabled", "") + } mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "pause", "balance-leader-scheduler", "60"}, nil) mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "resume", "balance-leader-scheduler"}, nil) @@ -554,4 +548,20 @@ func TestForwardSchedulerRequest(t *testing.T) { re.NoError(err) re.NoError(json.Unmarshal(output, &slice)) re.Contains(slice, "balance-leader-scheduler") + + mustUsage := func(args []string) { + output, err := pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Contains(string(output), "Usage") + } + mustUsage([]string{"-u", backendEndpoints, "scheduler", "pause", "balance-leader-scheduler"}) + mustExec(re, cmd, []string{"-u", backendEndpoints, "scheduler", "pause", "balance-leader-scheduler", "60"}, nil) + checkSchedulerWithStatusCommand := func(status string, expected []string) { + var schedulers []string + mustExec(re, cmd, []string{"-u", backendEndpoints, "scheduler", "show", "--status", status}, &schedulers) + re.Equal(expected, schedulers) + } + checkSchedulerWithStatusCommand("paused", []string{ + "balance-leader-scheduler", + }) } diff --git a/server/api/checker_test.go b/tests/server/api/checker_test.go similarity index 51% rename from server/api/checker_test.go rename to tests/server/api/checker_test.go index d6098b776cf..0f359553b73 100644 --- a/server/api/checker_test.go +++ b/tests/server/api/checker_test.go @@ -20,42 +20,25 @@ import ( "testing" "time" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/suite" tu "github.com/tikv/pd/pkg/utils/testutil" - "github.com/tikv/pd/server" + "github.com/tikv/pd/tests" ) type checkerTestSuite struct { suite.Suite - svr *server.Server - cleanup tu.CleanupFunc - urlPrefix string } func TestCheckerTestSuite(t *testing.T) { suite.Run(t, new(checkerTestSuite)) } - -func (suite *checkerTestSuite) SetupSuite() { - re := suite.Require() - suite.svr, suite.cleanup = mustNewServer(re) - server.MustWaitLeader(re, []*server.Server{suite.svr}) - - addr := suite.svr.GetAddr() - suite.urlPrefix = fmt.Sprintf("%s%s/api/v1/checker", addr, apiPrefix) - - mustBootstrapCluster(re, suite.svr) - mustPutStore(re, suite.svr, 1, metapb.StoreState_Up, metapb.NodeState_Serving, nil) - mustPutStore(re, suite.svr, 2, metapb.StoreState_Up, metapb.NodeState_Serving, nil) -} - -func (suite *checkerTestSuite) TearDownSuite() { - suite.cleanup() +func (suite *checkerTestSuite) TestAPI() { + env := tests.NewSchedulingTestEnvironment(suite.T()) + env.RunTestInTwoModes(suite.checkAPI) } -func (suite *checkerTestSuite) TestAPI() { - suite.testErrCases() +func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { + suite.testErrCases(cluster) testCases := []struct { name string @@ -68,25 +51,26 @@ func (suite *checkerTestSuite) TestAPI() { {name: "joint-state"}, } for _, testCase := range testCases { - suite.testGetStatus(testCase.name) - suite.testPauseOrResume(testCase.name) + suite.testGetStatus(cluster, testCase.name) + suite.testPauseOrResume(cluster, testCase.name) } } -func (suite *checkerTestSuite) testErrCases() { +func (suite *checkerTestSuite) testErrCases(cluster *tests.TestCluster) { + urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) // missing args input := make(map[string]interface{}) pauseArgs, err := json.Marshal(input) suite.NoError(err) re := suite.Require() - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/merge", pauseArgs, tu.StatusNotOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/merge", pauseArgs, tu.StatusNotOK(re)) suite.NoError(err) // negative delay input["delay"] = -10 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/merge", pauseArgs, tu.StatusNotOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/merge", pauseArgs, tu.StatusNotOK(re)) suite.NoError(err) // wrong name @@ -94,78 +78,85 @@ func (suite *checkerTestSuite) testErrCases() { input["delay"] = 30 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+name, pauseArgs, tu.StatusNotOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusNotOK(re)) suite.NoError(err) input["delay"] = 0 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+name, pauseArgs, tu.StatusNotOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusNotOK(re)) suite.NoError(err) } -func (suite *checkerTestSuite) testGetStatus(name string) { - handler := suite.svr.GetHandler() - +func (suite *checkerTestSuite) testGetStatus(cluster *tests.TestCluster, name string) { + input := make(map[string]interface{}) + urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) // normal run resp := make(map[string]interface{}) re := suite.Require() - err := tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", suite.urlPrefix, name), &resp) + err := tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, name), &resp) suite.NoError(err) suite.False(resp["paused"].(bool)) // paused - err = handler.PauseOrResumeChecker(name, 30) + input["delay"] = 30 + pauseArgs, err := json.Marshal(input) + suite.NoError(err) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) suite.NoError(err) resp = make(map[string]interface{}) - err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", suite.urlPrefix, name), &resp) + err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, name), &resp) suite.NoError(err) suite.True(resp["paused"].(bool)) // resumed - err = handler.PauseOrResumeChecker(name, 1) + input["delay"] = 0 + pauseArgs, err = json.Marshal(input) + suite.NoError(err) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) suite.NoError(err) time.Sleep(time.Second) resp = make(map[string]interface{}) - err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", suite.urlPrefix, name), &resp) + err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, name), &resp) suite.NoError(err) suite.False(resp["paused"].(bool)) } -func (suite *checkerTestSuite) testPauseOrResume(name string) { - handler := suite.svr.GetHandler() +func (suite *checkerTestSuite) testPauseOrResume(cluster *tests.TestCluster, name string) { input := make(map[string]interface{}) + urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) + resp := make(map[string]interface{}) // test pause. input["delay"] = 30 pauseArgs, err := json.Marshal(input) suite.NoError(err) re := suite.Require() - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) suite.NoError(err) - isPaused, err := handler.IsCheckerPaused(name) + err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, name), &resp) suite.NoError(err) - suite.True(isPaused) + suite.True(resp["paused"].(bool)) input["delay"] = 1 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) suite.NoError(err) time.Sleep(time.Second) - isPaused, err = handler.IsCheckerPaused(name) + err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, name), &resp) suite.NoError(err) - suite.False(isPaused) + suite.False(resp["paused"].(bool)) // test resume. input = make(map[string]interface{}) input["delay"] = 30 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) suite.NoError(err) input["delay"] = 0 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+name, pauseArgs, tu.StatusOK(re)) suite.NoError(err) - isPaused, err = handler.IsCheckerPaused(name) + err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s/%s", urlPrefix, name), &resp) suite.NoError(err) - suite.False(isPaused) + suite.False(resp["paused"].(bool)) } diff --git a/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go similarity index 73% rename from server/api/scheduler_test.go rename to tests/server/api/scheduler_test.go index b015bbe8f52..95c4d936a8c 100644 --- a/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -27,51 +27,53 @@ import ( sc "github.com/tikv/pd/pkg/schedule/config" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" + "github.com/tikv/pd/tests" ) +const apiPrefix = "/pd" + type scheduleTestSuite struct { suite.Suite - svr *server.Server - cleanup tu.CleanupFunc - urlPrefix string } func TestScheduleTestSuite(t *testing.T) { suite.Run(t, new(scheduleTestSuite)) } -func (suite *scheduleTestSuite) SetupSuite() { - re := suite.Require() - suite.svr, suite.cleanup = mustNewServer(re) - server.MustWaitLeader(re, []*server.Server{suite.svr}) - - addr := suite.svr.GetAddr() - suite.urlPrefix = fmt.Sprintf("%s%s/api/v1/schedulers", addr, apiPrefix) - - mustBootstrapCluster(re, suite.svr) - mustPutStore(re, suite.svr, 1, metapb.StoreState_Up, metapb.NodeState_Serving, nil) - mustPutStore(re, suite.svr, 2, metapb.StoreState_Up, metapb.NodeState_Serving, nil) - mustPutStore(re, suite.svr, 3, metapb.StoreState_Up, metapb.NodeState_Serving, nil) - mustPutStore(re, suite.svr, 4, metapb.StoreState_Up, metapb.NodeState_Serving, nil) +func (suite *scheduleTestSuite) TestScheduler() { + // Fixme: use RunTestInTwoModes when sync deleted scheduler is supported. + env := tests.NewSchedulingTestEnvironment(suite.T()) + env.RunTestInPDMode(suite.checkOriginAPI) + env = tests.NewSchedulingTestEnvironment(suite.T()) + env.RunTestInPDMode(suite.checkAPI) + env = tests.NewSchedulingTestEnvironment(suite.T()) + env.RunTestInPDMode(suite.checkDisable) } -func (suite *scheduleTestSuite) TearDownSuite() { - suite.cleanup() -} +func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { + leaderAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/schedulers", leaderAddr) + for i := 1; i <= 4; i++ { + store := &metapb.Store{ + Id: uint64(i), + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + } + tests.MustPutStore(suite.Require(), cluster, store) + } -func (suite *scheduleTestSuite) TestOriginAPI() { - addURL := suite.urlPrefix input := make(map[string]interface{}) input["name"] = "evict-leader-scheduler" input["store_id"] = 1 body, err := json.Marshal(input) suite.NoError(err) re := suite.Require() - suite.NoError(tu.CheckPostJSON(testDialClient, addURL, body, tu.StatusOK(re))) - rc := suite.svr.GetRaftCluster() - suite.Len(rc.GetSchedulers(), 1) + suite.NoError(tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusOK(re))) + + suite.Len(suite.getSchedulers(urlPrefix), 1) resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, "evict-leader-scheduler") + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, "evict-leader-scheduler") suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Len(resp["store-id-ranges"], 1) input1 := make(map[string]interface{}) @@ -80,40 +82,52 @@ func (suite *scheduleTestSuite) TestOriginAPI() { body, err = json.Marshal(input1) suite.NoError(err) suite.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/schedulers/persistFail", "return(true)")) - suite.NoError(tu.CheckPostJSON(testDialClient, addURL, body, tu.StatusNotOK(re))) - suite.Len(rc.GetSchedulers(), 1) + suite.NoError(tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusNotOK(re))) + suite.Len(suite.getSchedulers(urlPrefix), 1) resp = make(map[string]interface{}) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Len(resp["store-id-ranges"], 1) suite.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/schedulers/persistFail")) - suite.NoError(tu.CheckPostJSON(testDialClient, addURL, body, tu.StatusOK(re))) - suite.Len(rc.GetSchedulers(), 1) + suite.NoError(tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusOK(re))) + suite.Len(suite.getSchedulers(urlPrefix), 1) resp = make(map[string]interface{}) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Len(resp["store-id-ranges"], 2) - deleteURL := fmt.Sprintf("%s/%s", suite.urlPrefix, "evict-leader-scheduler-1") + deleteURL := fmt.Sprintf("%s/%s", urlPrefix, "evict-leader-scheduler-1") err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) suite.NoError(err) - suite.Len(rc.GetSchedulers(), 1) + suite.Len(suite.getSchedulers(urlPrefix), 1) resp1 := make(map[string]interface{}) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp1)) suite.Len(resp1["store-id-ranges"], 1) - deleteURL = fmt.Sprintf("%s/%s", suite.urlPrefix, "evict-leader-scheduler-2") + deleteURL = fmt.Sprintf("%s/%s", urlPrefix, "evict-leader-scheduler-2") suite.NoError(failpoint.Enable("github.com/tikv/pd/server/config/persistFail", "return(true)")) err = tu.CheckDelete(testDialClient, deleteURL, tu.Status(re, http.StatusInternalServerError)) suite.NoError(err) - suite.Len(rc.GetSchedulers(), 1) + suite.Len(suite.getSchedulers(urlPrefix), 1) suite.NoError(failpoint.Disable("github.com/tikv/pd/server/config/persistFail")) err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) suite.NoError(err) - suite.Empty(rc.GetSchedulers()) + suite.Empty(suite.getSchedulers(urlPrefix)) suite.NoError(tu.CheckGetJSON(testDialClient, listURL, nil, tu.Status(re, http.StatusNotFound))) err = tu.CheckDelete(testDialClient, deleteURL, tu.Status(re, http.StatusNotFound)) suite.NoError(err) } -func (suite *scheduleTestSuite) TestAPI() { +func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { re := suite.Require() + leaderAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/schedulers", leaderAddr) + for i := 1; i <= 4; i++ { + store := &metapb.Store{ + Id: uint64(i), + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + } + tests.MustPutStore(suite.Require(), cluster, store) + } + type arg struct { opt string value interface{} @@ -129,12 +143,12 @@ func (suite *scheduleTestSuite) TestAPI() { createdName: "balance-leader-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Equal(4.0, resp["batch"]) dataMap := make(map[string]interface{}) dataMap["batch"] = 3 - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(dataMap) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -179,7 +193,7 @@ func (suite *scheduleTestSuite) TestAPI() { createdName: "balance-hot-region-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) expectMap := map[string]interface{}{ "min-hot-byte-rate": 100.0, @@ -210,7 +224,7 @@ func (suite *scheduleTestSuite) TestAPI() { dataMap := make(map[string]interface{}) dataMap["max-zombie-rounds"] = 5.0 expectMap["max-zombie-rounds"] = 5.0 - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(dataMap) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -240,13 +254,13 @@ func (suite *scheduleTestSuite) TestAPI() { createdName: "split-bucket-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Equal(3.0, resp["degree"]) suite.Equal(0.0, resp["split-limit"]) dataMap := make(map[string]interface{}) dataMap["degree"] = 4 - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(dataMap) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -295,12 +309,12 @@ func (suite *scheduleTestSuite) TestAPI() { createdName: "balance-witness-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Equal(4.0, resp["batch"]) dataMap := make(map[string]interface{}) dataMap["batch"] = 3 - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(dataMap) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -346,7 +360,7 @@ func (suite *scheduleTestSuite) TestAPI() { args: []arg{{"store_id", 1}}, extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) exceptMap := make(map[string]interface{}) exceptMap["1"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} @@ -356,7 +370,7 @@ func (suite *scheduleTestSuite) TestAPI() { input := make(map[string]interface{}) input["name"] = "grant-leader-scheduler" input["store_id"] = 2 - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(input) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -366,7 +380,7 @@ func (suite *scheduleTestSuite) TestAPI() { suite.Equal(exceptMap, resp["store-id-ranges"]) // using /pd/v1/schedule-config/grant-leader-scheduler/config to delete exists store from grant-leader-scheduler - deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name, "2") + deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name, "2") err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) suite.NoError(err) resp = make(map[string]interface{}) @@ -384,14 +398,14 @@ func (suite *scheduleTestSuite) TestAPI() { // Test the scheduler config handler. extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Equal("", resp["start-key"]) suite.Equal("", resp["end-key"]) suite.Equal("test", resp["range-name"]) resp["start-key"] = "a_00" resp["end-key"] = "a_99" - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(resp) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -409,7 +423,7 @@ func (suite *scheduleTestSuite) TestAPI() { // Test the scheduler config handler. extraTestFunc: func(name string) { resp := make(map[string]interface{}) - listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + listURL := fmt.Sprintf("%s%s%s/%s/list", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) exceptMap := make(map[string]interface{}) exceptMap["3"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} @@ -419,7 +433,7 @@ func (suite *scheduleTestSuite) TestAPI() { input := make(map[string]interface{}) input["name"] = "evict-leader-scheduler" input["store_id"] = 4 - updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) + updateURL := fmt.Sprintf("%s%s%s/%s/config", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(input) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) @@ -429,7 +443,7 @@ func (suite *scheduleTestSuite) TestAPI() { suite.Equal(exceptMap, resp["store-id-ranges"]) // using /pd/v1/schedule-config/evict-leader-scheduler/config to delete exist store from evict-leader-scheduler - deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name, "4") + deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name, "4") err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) suite.NoError(err) resp = make(map[string]interface{}) @@ -449,11 +463,11 @@ func (suite *scheduleTestSuite) TestAPI() { } body, err := json.Marshal(input) suite.NoError(err) - suite.testPauseOrResume(testCase.name, testCase.createdName, body) + suite.testPauseOrResume(urlPrefix, testCase.name, testCase.createdName, body) if testCase.extraTestFunc != nil { testCase.extraTestFunc(testCase.createdName) } - suite.deleteScheduler(testCase.createdName) + suite.deleteScheduler(urlPrefix, testCase.createdName) } // test pause and resume all schedulers. @@ -467,7 +481,7 @@ func (suite *scheduleTestSuite) TestAPI() { } body, err := json.Marshal(input) suite.NoError(err) - suite.addScheduler(body) + suite.addScheduler(urlPrefix, body) if testCase.extraTestFunc != nil { testCase.extraTestFunc(testCase.createdName) } @@ -478,22 +492,21 @@ func (suite *scheduleTestSuite) TestAPI() { input["delay"] = 30 pauseArgs, err := json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) - handler := suite.svr.GetHandler() + for _, testCase := range testCases { createdName := testCase.createdName if createdName == "" { createdName = testCase.name } - isPaused, err := handler.IsSchedulerPaused(createdName) - suite.NoError(err) + isPaused := suite.isSchedulerPaused(urlPrefix, createdName) suite.True(isPaused) } input["delay"] = 1 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) time.Sleep(time.Second) for _, testCase := range testCases { @@ -501,8 +514,7 @@ func (suite *scheduleTestSuite) TestAPI() { if createdName == "" { createdName = testCase.name } - isPaused, err := handler.IsSchedulerPaused(createdName) - suite.NoError(err) + isPaused := suite.isSchedulerPaused(urlPrefix, createdName) suite.False(isPaused) } @@ -510,20 +522,19 @@ func (suite *scheduleTestSuite) TestAPI() { input["delay"] = 30 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) input["delay"] = 0 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/all", pauseArgs, tu.StatusOK(re)) suite.NoError(err) for _, testCase := range testCases { createdName := testCase.createdName if createdName == "" { createdName = testCase.name } - isPaused, err := handler.IsSchedulerPaused(createdName) - suite.NoError(err) + isPaused := suite.isSchedulerPaused(urlPrefix, createdName) suite.False(isPaused) } @@ -533,20 +544,32 @@ func (suite *scheduleTestSuite) TestAPI() { if createdName == "" { createdName = testCase.name } - suite.deleteScheduler(createdName) + suite.deleteScheduler(urlPrefix, createdName) } } -func (suite *scheduleTestSuite) TestDisable() { +func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { + re := suite.Require() + leaderAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/schedulers", leaderAddr) + for i := 1; i <= 4; i++ { + store := &metapb.Store{ + Id: uint64(i), + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + } + tests.MustPutStore(suite.Require(), cluster, store) + } + name := "shuffle-leader-scheduler" input := make(map[string]interface{}) input["name"] = name body, err := json.Marshal(input) suite.NoError(err) - suite.addScheduler(body) + suite.addScheduler(urlPrefix, body) - re := suite.Require() - u := fmt.Sprintf("%s%s/api/v1/config/schedule", suite.svr.GetAddr(), apiPrefix) + u := fmt.Sprintf("%s%s/api/v1/config/schedule", leaderAddr, apiPrefix) var scheduleConfig sc.ScheduleConfig err = tu.ReadGetJSON(re, testDialClient, u, &scheduleConfig) suite.NoError(err) @@ -559,12 +582,12 @@ func (suite *scheduleTestSuite) TestDisable() { suite.NoError(err) var schedulers []string - err = tu.ReadGetJSON(re, testDialClient, suite.urlPrefix, &schedulers) + err = tu.ReadGetJSON(re, testDialClient, urlPrefix, &schedulers) suite.NoError(err) suite.Len(schedulers, 1) suite.Equal(name, schedulers[0]) - err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s?status=disabled", suite.urlPrefix), &schedulers) + err = tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s?status=disabled", urlPrefix), &schedulers) suite.NoError(err) suite.Len(schedulers, 1) suite.Equal(name, schedulers[0]) @@ -576,55 +599,44 @@ func (suite *scheduleTestSuite) TestDisable() { err = tu.CheckPostJSON(testDialClient, u, body, tu.StatusOK(re)) suite.NoError(err) - suite.deleteScheduler(name) + suite.deleteScheduler(urlPrefix, name) } -func (suite *scheduleTestSuite) addScheduler(body []byte) { - err := tu.CheckPostJSON(testDialClient, suite.urlPrefix, body, tu.StatusOK(suite.Require())) +func (suite *scheduleTestSuite) addScheduler(urlPrefix string, body []byte) { + err := tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusOK(suite.Require())) suite.NoError(err) } -func (suite *scheduleTestSuite) deleteScheduler(createdName string) { - deleteURL := fmt.Sprintf("%s/%s", suite.urlPrefix, createdName) +func (suite *scheduleTestSuite) deleteScheduler(urlPrefix string, createdName string) { + deleteURL := fmt.Sprintf("%s/%s", urlPrefix, createdName) err := tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(suite.Require())) suite.NoError(err) } -func (suite *scheduleTestSuite) testPauseOrResume(name, createdName string, body []byte) { +func (suite *scheduleTestSuite) testPauseOrResume(urlPrefix string, name, createdName string, body []byte) { if createdName == "" { createdName = name } re := suite.Require() - err := tu.CheckPostJSON(testDialClient, suite.urlPrefix, body, tu.StatusOK(re)) - suite.NoError(err) - handler := suite.svr.GetHandler() - sches, err := handler.GetSchedulers() + err := tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusOK(re)) suite.NoError(err) - suite.Equal(createdName, sches[0]) // test pause. input := make(map[string]interface{}) input["delay"] = 30 pauseArgs, err := json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) - suite.NoError(err) - isPaused, err := handler.IsSchedulerPaused(createdName) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) suite.NoError(err) + isPaused := suite.isSchedulerPaused(urlPrefix, createdName) suite.True(isPaused) input["delay"] = 1 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) - suite.NoError(err) - pausedAt, err := handler.GetPausedSchedulerDelayAt(createdName) - suite.NoError(err) - resumeAt, err := handler.GetPausedSchedulerDelayUntil(createdName) - suite.NoError(err) - suite.Equal(int64(1), resumeAt-pausedAt) - time.Sleep(time.Second) - isPaused, err = handler.IsSchedulerPaused(createdName) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) suite.NoError(err) + time.Sleep(time.Second * 2) + isPaused = suite.isSchedulerPaused(urlPrefix, createdName) suite.False(isPaused) // test resume. @@ -632,14 +644,30 @@ func (suite *scheduleTestSuite) testPauseOrResume(name, createdName string, body input["delay"] = 30 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) suite.NoError(err) input["delay"] = 0 pauseArgs, err = json.Marshal(input) suite.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) - suite.NoError(err) - isPaused, err = handler.IsSchedulerPaused(createdName) + err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) suite.NoError(err) + isPaused = suite.isSchedulerPaused(urlPrefix, createdName) suite.False(isPaused) } + +func (suite *scheduleTestSuite) getSchedulers(urlPrefix string) (resp []string) { + tu.ReadGetJSON(suite.Require(), testDialClient, urlPrefix, &resp) + return +} + +func (suite *scheduleTestSuite) isSchedulerPaused(urlPrefix, name string) bool { + var schedulers []string + err := tu.ReadGetJSON(suite.Require(), testDialClient, fmt.Sprintf("%s?status=paused", urlPrefix), &schedulers) + suite.NoError(err) + for _, scheduler := range schedulers { + if scheduler == name { + return true + } + } + return false +} diff --git a/tests/testutil.go b/tests/testutil.go index af4560e2609..613705d3eb6 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -228,6 +228,13 @@ func MustReportBuckets(re *require.Assertions, cluster *TestCluster, regionID ui return buckets } +type mode int + +const ( + pdMode mode = iota + apiMode +) + // SchedulingTestEnvironment is used for test purpose. type SchedulingTestEnvironment struct { t *testing.T @@ -247,18 +254,25 @@ func NewSchedulingTestEnvironment(t *testing.T, opts ...ConfigOption) *Schedulin // RunTestInTwoModes is to run test in two modes. func (s *SchedulingTestEnvironment) RunTestInTwoModes(test func(*TestCluster)) { - // run test in pd mode + s.RunTestInPDMode(test) + s.RunTestInAPIMode(test) +} + +// RunTestInPDMode is to run test in pd mode. +func (s *SchedulingTestEnvironment) RunTestInPDMode(test func(*TestCluster)) { s.t.Log("start to run test in pd mode") - re := require.New(s.t) - s.runInPDMode() + s.startCluster(pdMode) test(s.cluster) s.cleanup() s.t.Log("finish to run test in pd mode") +} - // run test in api mode +// RunTestInAPIMode is to run test in api mode. +func (s *SchedulingTestEnvironment) RunTestInAPIMode(test func(*TestCluster)) { s.t.Log("start to run test in api mode") + re := require.New(s.t) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember", `return(true)`)) - s.runInAPIMode() + s.startCluster(apiMode) test(s.cluster) s.cleanup() re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) @@ -270,34 +284,32 @@ func (s *SchedulingTestEnvironment) cleanup() { s.cancel() } -func (s *SchedulingTestEnvironment) runInPDMode() { - var err error - re := require.New(s.t) - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.cluster, err = NewTestCluster(s.ctx, 1, s.opts...) - re.NoError(err) - err = s.cluster.RunInitialServers() - re.NoError(err) - re.NotEmpty(s.cluster.WaitLeader()) - leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) - re.NoError(leaderServer.BootstrapCluster()) -} - -func (s *SchedulingTestEnvironment) runInAPIMode() { +func (s *SchedulingTestEnvironment) startCluster(m mode) { var err error re := require.New(s.t) s.ctx, s.cancel = context.WithCancel(context.Background()) - s.cluster, err = NewTestAPICluster(s.ctx, 1, s.opts...) - re.NoError(err) - err = s.cluster.RunInitialServers() - re.NoError(err) - re.NotEmpty(s.cluster.WaitLeader()) - leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) - re.NoError(leaderServer.BootstrapCluster()) - // start scheduling cluster - tc, err := NewTestSchedulingCluster(s.ctx, 1, leaderServer.GetAddr()) - re.NoError(err) - tc.WaitForPrimaryServing(re) - s.cluster.SetSchedulingCluster(tc) - time.Sleep(200 * time.Millisecond) // wait for scheduling cluster to update member + switch m { + case pdMode: + s.cluster, err = NewTestCluster(s.ctx, 1, s.opts...) + re.NoError(err) + err = s.cluster.RunInitialServers() + re.NoError(err) + re.NotEmpty(s.cluster.WaitLeader()) + leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) + re.NoError(leaderServer.BootstrapCluster()) + case apiMode: + s.cluster, err = NewTestAPICluster(s.ctx, 1, s.opts...) + re.NoError(err) + err = s.cluster.RunInitialServers() + re.NoError(err) + re.NotEmpty(s.cluster.WaitLeader()) + leaderServer := s.cluster.GetServer(s.cluster.GetLeader()) + re.NoError(leaderServer.BootstrapCluster()) + // start scheduling cluster + tc, err := NewTestSchedulingCluster(s.ctx, 1, leaderServer.GetAddr()) + re.NoError(err) + tc.WaitForPrimaryServing(re) + s.cluster.SetSchedulingCluster(tc) + time.Sleep(200 * time.Millisecond) // wait for scheduling cluster to update member + } }