diff --git a/.gitignore b/.gitignore index 0770e3f49c..2453f6c8cc 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ images/tidb-operator/bin/ images/tidb-operator-e2e/bin/ images/tidb-operator-e2e/tidb-cluster/ images/tidb-operator-e2e/tidb-operator/ +tests/images/e2e/tidb-cluster/ +tests/images/e2e/tidb-operator/ *.tar tmp/ data/ diff --git a/Makefile b/Makefile index 4b3483ed07..13abba8e00 100644 --- a/Makefile +++ b/Makefile @@ -42,10 +42,16 @@ e2e-docker-push: e2e-docker docker push "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:latest" e2e-docker: e2e-build + [[ -d tests/images/e2e/tidb-operator ]] && rm -r tests/images/e2e/tidb-operator || true + [[ -d tests/images/e2e/tidb-cluster ]] && rm -r tests/images/e2e/tidb-cluster || true + [[ -d tests/images/e2e/tidb-backup ]] && rm -r tests/images/e2e/tidb-backup || true + cp -r charts/tidb-operator tests/images/e2e + cp -r charts/tidb-cluster tests/images/e2e + cp -r charts/tidb-backup tests/images/e2e docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:latest" tests/images/e2e e2e-build: - $(GOENV) ginkgo build tests/e2e + $(GO) -ldflags '$(LDFLAGS)' -o tests/images/e2e/bin/e2e tests/cmd/e2e/main.go test: @echo "Run unit tests" diff --git a/tests/actions.go b/tests/actions.go index 25f452570c..c05a02269a 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -116,6 +116,7 @@ type OperatorInfo struct { Image string Tag string SchedulerImage string + SchedulerTag string LogLevel string } @@ -196,28 +197,43 @@ func (tc *TidbClusterInfo) TidbClusterHelmSetString(m map[string]string) string return strings.Join(arr, ",") } -func (oa *operatorActions) DeployOperator(info *OperatorInfo) error { - if err := cloneOperatorRepo(); err != nil { - return err +func (oi *OperatorInfo) OperatorHelmSetString(m map[string]string) string { + set := map[string]string{ + "operatorImage": oi.Image, + "controllerManager.autoFailover": "true", + "scheduler.kubeSchedulerImageName": oi.SchedulerImage, + "controllerManager.logLevel": oi.LogLevel, + "scheduler.logLevel": "2", } - if err := checkoutTag(info.Tag); err != nil { - return err + if oi.SchedulerTag != "" { + set["scheduler.kubeSchedulerImageTag"] = oi.SchedulerTag + } + + arr := make([]string, 0, len(set)) + for k, v := range set { + arr = append(arr, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(arr, ",") +} + +func (oa *operatorActions) DeployOperator(info *OperatorInfo) error { + if info.Tag != "e2e" { + if err := cloneOperatorRepo(); err != nil { + return err + } + if err := checkoutTag(info.Tag); err != nil { + return err + } } cmd := fmt.Sprintf(`helm install /charts/%s/tidb-operator \ --name %s \ --namespace %s \ - --set operatorImage=%s \ - --set controllerManager.autoFailover=true \ - --set scheduler.kubeSchedulerImage=%s \ - --set controllerManager.logLevel=%s \ - --set scheduler.logLevel=2`, + --set-string %s`, info.Tag, info.ReleaseName, info.Namespace, - info.Image, - info.SchedulerImage, - info.LogLevel) + info.OperatorHelmSetString(nil)) glog.Info(cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { @@ -257,7 +273,17 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterInfo) error { glog.Infof("deploy tidb cluster end cluster[%s] namespace[%s]", info.ClusterName, info.Namespace) }() - err := oa.CreateSecret(info) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: info.Namespace, + }, + } + _, err := oa.kubeCli.CoreV1().Namespaces().Create(namespace) + if err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create namespace[%s]:%v", info.Namespace, err) + } + + err = oa.CreateSecret(info) if err != nil { return fmt.Errorf("failed to create secret of cluster [%s]: %v", info.ClusterName, err) } @@ -315,6 +341,12 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterInfo) error { } } + // delete all jobs + allJobsSet := label.Label{}.Instance(info.ClusterName).String() + if res, err := exec.Command("kubectl", "delete", "jobs", "-n", info.Namespace, "-l", allJobsSet).CombinedOutput(); err != nil { + return fmt.Errorf("failed to delete jobs: %v, %s", err, string(res)) + } + patchPVCmd := fmt.Sprintf(`kubectl get pv -l %s=%s,%s=%s --output=name | xargs -I {} \ kubectl patch {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'`, label.NamespaceLabelKey, info.Namespace, label.InstanceLabelKey, info.ClusterName) diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index 8f61d35528..41c8a34238 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -15,17 +15,15 @@ package main import ( "fmt" - "net/http" _ "net/http/pprof" - "github.com/pingcap/tidb-operator/tests/pkg/workload" - "github.com/pingcap/tidb-operator/tests/pkg/workload/ddl" - "github.com/golang/glog" "github.com/jinzhu/copier" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/tests" "github.com/pingcap/tidb-operator/tests/backup" + "github.com/pingcap/tidb-operator/tests/pkg/workload" + "github.com/pingcap/tidb-operator/tests/pkg/workload/ddl" "k8s.io/apiserver/pkg/util/logs" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -41,16 +39,6 @@ func main() { glog.Fatalf("failed to parse config: %v", err) } - go func() { - glog.Info(http.ListenAndServe("localhost:6060", nil)) - }() - - // TODO read these args from config - beginTidbVersion := "v2.1.0" - toTidbVersion := "v2.1.4" - operatorTag := "master" - operatorImage := "pingcap/tidb-operator:latest" - cfg, err := rest.InClusterConfig() if err != nil { glog.Fatalf("failed to get config: %v", err) @@ -69,12 +57,17 @@ func main() { operatorInfo := &tests.OperatorInfo{ Namespace: "pingcap", ReleaseName: "operator", - Image: operatorImage, - Tag: operatorTag, - SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1", + Image: conf.OperatorImage, + Tag: conf.OperatorTag, + SchedulerImage: "mirantis/hypokube", + SchedulerTag: "final", LogLevel: "2", } + initTidbVersion, err := conf.GetInitTidbVersion() + if err != nil { + glog.Fatal(err) + } // create database and table and insert a column for test backup and restore initSql := `"create database record;use record;create table test(t char(32))"` @@ -84,10 +77,10 @@ func main() { { Namespace: name1, ClusterName: name1, - OperatorTag: operatorTag, - PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion), - TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion), - TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion), + OperatorTag: conf.OperatorTag, + PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion), StorageClassName: "local-storage", Password: "admin", InitSql: initSql, @@ -115,10 +108,10 @@ func main() { { Namespace: name2, ClusterName: name2, - OperatorTag: "master", - PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion), - TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion), - TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion), + OperatorTag: conf.OperatorTag, + PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion), StorageClassName: "local-storage", Password: "admin", InitSql: initSql, @@ -231,6 +224,22 @@ func main() { } } + // upgrade test + upgradeTidbVersions := conf.GetUpgradeTidbVersions() + for _, upgradeTidbVersion := range upgradeTidbVersions { + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.UpgradeAll(upgradeTidbVersion) + if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } + } + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } + } + } + return nil }, workloads...) @@ -238,19 +247,6 @@ func main() { glog.Fatal(err) } - for _, clusterInfo := range clusterInfos { - clusterInfo = clusterInfo.UpgradeAll(toTidbVersion) - if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - for _, clusterInfo := range clusterInfos { - if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - // backup and restore backupClusterInfo := clusterInfos[0] restoreClusterInfo := &tests.TidbClusterInfo{} diff --git a/tests/config.go b/tests/config.go index 031bf4875e..df7bfffeaf 100644 --- a/tests/config.go +++ b/tests/config.go @@ -2,7 +2,9 @@ package tests import ( "flag" + "fmt" "io/ioutil" + "strings" yaml "gopkg.in/yaml.v2" ) @@ -11,6 +13,9 @@ import ( type Config struct { configFile string + TidbVersions string `yaml:"tidb_versions" json:"tidb_versions"` + OperatorTag string `yaml:"operator_tag" json:"operator_tag"` + OperatorImage string `yaml:"operator_image" json:"operator_image"` LogDir string `yaml:"log_dir" json:"log_dir"` FaultTriggerPort int `yaml:"fault_trigger_port" json:"fault_trigger_port"` Nodes []Nodes `yaml:"nodes" json:"nodes"` @@ -27,9 +32,12 @@ type Nodes struct { // NewConfig creates a new config. func NewConfig() *Config { cfg := &Config{} - flag.StringVar(&cfg.configFile, "config", "/etc/e2e/config.yaml", "Config file") + flag.StringVar(&cfg.configFile, "config", "", "Config file") flag.StringVar(&cfg.LogDir, "log-dir", "/logDir", "log directory") flag.IntVar(&cfg.FaultTriggerPort, "fault-trigger-port", 23332, "the http port of fault trigger service") + flag.StringVar(&cfg.TidbVersions, "tidb-versions", "v2.1.3,v2.1.4", "tidb versions") + flag.StringVar(&cfg.OperatorTag, "operator-tag", "master", "operator tag used to choose charts") + flag.StringVar(&cfg.OperatorImage, "operator-image", "pingcap/tidb-operator:latest", "operator image") return cfg } @@ -63,3 +71,18 @@ func (c *Config) configFromFile(path string) error { return nil } + +func (c *Config) GetInitTidbVersion() (string, error) { + tidbVersions := strings.Split(c.TidbVersions, ",") + if len(tidbVersions) == 0 { + return "", fmt.Errorf("init tidb versions can not be nil") + } + + return tidbVersions[0], nil +} + +func (c *Config) GetUpgradeTidbVersions() []string { + tidbVersions := strings.Split(c.TidbVersions, ",") + + return tidbVersions[1:] +} diff --git a/tests/images/e2e/Dockerfile b/tests/images/e2e/Dockerfile index b931c23ad6..5abe477cd3 100644 --- a/tests/images/e2e/Dockerfile +++ b/tests/images/e2e/Dockerfile @@ -14,4 +14,8 @@ RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VER rm -rf linux-amd64 && \ rm helm-${HELM_VERSION}-linux-amd64.tar.gz +ADD tidb-operator /charts/e2e/tidb-operator +ADD tidb-cluster /charts/e2e/tidb-cluster +ADD tidb-backup /charts/e2e/tidb-backup + ADD bin/e2e /usr/local/bin/e2e diff --git a/tests/manifests/e2e/e2e.yaml b/tests/manifests/e2e/e2e.yaml new file mode 100644 index 0000000000..f99a23926f --- /dev/null +++ b/tests/manifests/e2e/e2e.yaml @@ -0,0 +1,45 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: tidb-operator-e2e +subjects: +- kind: ServiceAccount + namespace: tidb-operator-e2e + name: tidb-operator-e2e +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: tidb-operator-e2e + name: tidb-operator-e2e +--- +apiVersion: v1 +kind: Pod +metadata: + namespace: tidb-operator-e2e + name: tidb-operator-e2e +spec: + serviceAccount: tidb-operator-e2e + containers: + - name: tidb-operator-e2e + image: "" + imagePullPolicy: Always + command: + - /usr/local/bin/e2e + - --operator-tag=e2e + - --operator-image=pingcap/tidb-operator:latest + - --tidb-versions=v2.1.3,v2.1.4 + volumeMounts: + - mountPath: /logDir + name: logdir + volumes: + - name: logdir + hostPath: + path: /var/log + type: Directory + restartPolicy: Never diff --git a/tests/manifests/stability/stability-configmap.yaml b/tests/manifests/stability/stability-configmap.yaml new file mode 100644 index 0000000000..0af85753bb --- /dev/null +++ b/tests/manifests/stability/stability-configmap.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: tidb-operator-e2e + name: tidb-operator-e2e-config +data: + e2e-config: |- + nodes: + - physical_node: 172.16.4.39 + nodes: + - 172.16.4.171 + - 172.16.4.172 + - 172.16.4.173 + - physical_node: 172.16.4.40 + nodes: + - 172.16.4.174 + - 172.16.4.175 + - 172.16.4.176 + etcds: + - physical_node: 172.16.4.39 + nodes: + - 172.16.4.171 + - 172.16.4.172 + - 172.16.4.173 + apiservers: + - physical_node: 172.16.4.39 + nodes: + - 172.16.4.171 + - 172.16.4.172 + - 172.16.4.173 + + diff --git a/tests/manifests/stability/stability.yaml b/tests/manifests/stability/stability.yaml new file mode 100644 index 0000000000..7c8eb32c07 --- /dev/null +++ b/tests/manifests/stability/stability.yaml @@ -0,0 +1,52 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: tidb-operator-stability +subjects: +- kind: ServiceAccount + namespace: tidb-operator-stability + name: tidb-operator-stability +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: tidb-operator-stability + name: tidb-operator-stability +--- +apiVersion: v1 +kind: Pod +metadata: + namespace: tidb-operator-stability + name: tidb-operator-stability +spec: + serviceAccount: tidb-operator-stability + containers: + - name: tidb-operator-stability + image: "" + imagePullPolicy: Always + command: ["sh", "-c", "/usr/local/bin/stability"] + args: + - --config=/etc/e2e/config.yaml + volumeMounts: + - mountPath: /logDir + name: logdir + - name: config + readOnly: true + mountPath: /etc/e2e + volumes: + - name: logdir + hostPath: + path: /var/log + type: Directory + - name: config + configMap: + name: tidb-operator-e2e-config + items: + - key: e2e-config + path: config.yaml + restartPolicy: Never