Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rewrite e2e #346

Merged
merged 17 commits into from
Mar 27, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ images/tidb-operator/bin/
images/tidb-operator-e2e/bin/
images/tidb-operator-e2e/tidb-cluster/
images/tidb-operator-e2e/tidb-operator/
tests/images/e2e/tidb-cluster/
tests/images/e2e/tidb-operator/
*.tar
tmp/
data/
Expand Down
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,16 @@ e2e-docker-push: e2e-docker
docker push "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:latest"

e2e-docker: e2e-build
[[ -d tests/images/e2e/tidb-operator ]] && rm -r tests/images/e2e/tidb-operator || true
[[ -d tests/images/e2e/tidb-cluster ]] && rm -r tests/images/e2e/tidb-cluster || true
[[ -d tests/images/e2e/tidb-backup ]] && rm -r tests/images/e2e/tidb-backup || true
cp -r charts/tidb-operator tests/images/e2e
cp -r charts/tidb-cluster tests/images/e2e
cp -r charts/tidb-backup tests/images/e2e
docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:latest" tests/images/e2e

e2e-build:
$(GOENV) ginkgo build tests/e2e
$(GO) -ldflags '$(LDFLAGS)' -o tests/images/e2e/bin/e2e tests/cmd/e2e/main.go

test:
@echo "Run unit tests"
Expand Down
60 changes: 46 additions & 14 deletions tests/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ type OperatorInfo struct {
Image string
Tag string
SchedulerImage string
SchedulerTag string
LogLevel string
}

Expand Down Expand Up @@ -196,28 +197,43 @@ func (tc *TidbClusterInfo) TidbClusterHelmSetString(m map[string]string) string
return strings.Join(arr, ",")
}

func (oa *operatorActions) DeployOperator(info *OperatorInfo) error {
if err := cloneOperatorRepo(); err != nil {
return err
func (oi *OperatorInfo) OperatorHelmSetString(m map[string]string) string {
set := map[string]string{
"operatorImage": oi.Image,
"controllerManager.autoFailover": "true",
"scheduler.kubeSchedulerImageName": oi.SchedulerImage,
"controllerManager.logLevel": oi.LogLevel,
"scheduler.logLevel": "2",
}
if err := checkoutTag(info.Tag); err != nil {
return err
if oi.SchedulerTag != "" {
set["scheduler.kubeSchedulerImageTag"] = oi.SchedulerTag
}

arr := make([]string, 0, len(set))
for k, v := range set {
arr = append(arr, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(arr, ",")
}

func (oa *operatorActions) DeployOperator(info *OperatorInfo) error {
if info.Tag != "e2e" {
if err := cloneOperatorRepo(); err != nil {
return err
}
if err := checkoutTag(info.Tag); err != nil {
return err
}
}

cmd := fmt.Sprintf(`helm install /charts/%s/tidb-operator \
--name %s \
--namespace %s \
--set operatorImage=%s \
--set controllerManager.autoFailover=true \
--set scheduler.kubeSchedulerImage=%s \
--set controllerManager.logLevel=%s \
--set scheduler.logLevel=2`,
--set-string %s`,
info.Tag,
info.ReleaseName,
info.Namespace,
info.Image,
info.SchedulerImage,
info.LogLevel)
info.OperatorHelmSetString(nil))
glog.Info(cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
Expand Down Expand Up @@ -257,7 +273,17 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterInfo) error {
glog.Infof("deploy tidb cluster end cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
}()

err := oa.CreateSecret(info)
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: info.Namespace,
},
}
_, err := oa.kubeCli.CoreV1().Namespaces().Create(namespace)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create namespace[%s]:%v", info.Namespace, err)
}

err = oa.CreateSecret(info)
if err != nil {
return fmt.Errorf("failed to create secret of cluster [%s]: %v", info.ClusterName, err)
}
Expand Down Expand Up @@ -315,6 +341,12 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterInfo) error {
}
}

// delete all jobs
allJobsSet := label.Label{}.Instance(info.ClusterName).String()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
allJobsSet := label.Label{}.Instance(info.ClusterName).String()
allJobsSet := label.New().Instance(info.ClusterName).String()

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the namespace contains some jobs which label app.kubernetes.io/managed-by: Tiller.
these jobs cannot be select by label.New()

if res, err := exec.Command("kubectl", "delete", "jobs", "-n", info.Namespace, "-l", allJobsSet).CombinedOutput(); err != nil {
return fmt.Errorf("failed to delete jobs: %v, %s", err, string(res))
}

patchPVCmd := fmt.Sprintf(`kubectl get pv -l %s=%s,%s=%s --output=name | xargs -I {} \
kubectl patch {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'`,
label.NamespaceLabelKey, info.Namespace, label.InstanceLabelKey, info.ClusterName)
Expand Down
72 changes: 34 additions & 38 deletions tests/cmd/e2e/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,15 @@ package main

import (
"fmt"
"net/http"
_ "net/http/pprof"

"github.com/pingcap/tidb-operator/tests/pkg/workload"
"github.com/pingcap/tidb-operator/tests/pkg/workload/ddl"

"github.com/golang/glog"
"github.com/jinzhu/copier"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/tests"
"github.com/pingcap/tidb-operator/tests/backup"
"github.com/pingcap/tidb-operator/tests/pkg/workload"
"github.com/pingcap/tidb-operator/tests/pkg/workload/ddl"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
Expand All @@ -41,16 +39,6 @@ func main() {
glog.Fatalf("failed to parse config: %v", err)
}

go func() {
glog.Info(http.ListenAndServe("localhost:6060", nil))
}()

// TODO read these args from config
beginTidbVersion := "v2.1.0"
toTidbVersion := "v2.1.4"
operatorTag := "master"
operatorImage := "pingcap/tidb-operator:latest"

cfg, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("failed to get config: %v", err)
Expand All @@ -69,12 +57,17 @@ func main() {
operatorInfo := &tests.OperatorInfo{
Namespace: "pingcap",
ReleaseName: "operator",
Image: operatorImage,
Tag: operatorTag,
SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1",
Image: conf.OperatorImage,
Tag: conf.OperatorTag,
SchedulerImage: "mirantis/hypokube",
SchedulerTag: "final",
LogLevel: "2",
}

initTidbVersion, err := conf.GetInitTidbVersion()
if err != nil {
glog.Fatal(err)
}
// create database and table and insert a column for test backup and restore
initSql := `"create database record;use record;create table test(t char(32))"`

Expand All @@ -84,10 +77,10 @@ func main() {
{
Namespace: name1,
ClusterName: name1,
OperatorTag: operatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion),
OperatorTag: conf.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion),
StorageClassName: "local-storage",
Password: "admin",
InitSql: initSql,
Expand Down Expand Up @@ -115,10 +108,10 @@ func main() {
{
Namespace: name2,
ClusterName: name2,
OperatorTag: "master",
PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion),
OperatorTag: conf.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion),
StorageClassName: "local-storage",
Password: "admin",
InitSql: initSql,
Expand Down Expand Up @@ -231,26 +224,29 @@ func main() {
}
}

// upgrade test
upgradeTidbVersions := conf.GetUpgradeTidbVersions()
for _, upgradeTidbVersion := range upgradeTidbVersions {
for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.UpgradeAll(upgradeTidbVersion)
if err = oa.UpgradeTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}
for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}
}

return nil
}, workloads...)

if err != nil {
glog.Fatal(err)
}

for _, clusterInfo := range clusterInfos {
clusterInfo = clusterInfo.UpgradeAll(toTidbVersion)
if err = oa.UpgradeTidbCluster(clusterInfo); err != nil {
glog.Fatal(err)
}
}

for _, clusterInfo := range clusterInfos {
if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil {
glog.Fatal(err)
}
}

// backup and restore
backupClusterInfo := clusterInfos[0]
restoreClusterInfo := &tests.TidbClusterInfo{}
Expand Down
25 changes: 24 additions & 1 deletion tests/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@ package tests

import (
"flag"
"fmt"
"io/ioutil"
"strings"

yaml "gopkg.in/yaml.v2"
)
Expand All @@ -11,6 +13,9 @@ import (
type Config struct {
configFile string

TidbVersions string `yaml:"tidb_versions" json:"tidb_versions"`
OperatorTag string `yaml:"operator_tag" json:"operator_tag"`
OperatorImage string `yaml:"operator_image" json:"operator_image"`
LogDir string `yaml:"log_dir" json:"log_dir"`
FaultTriggerPort int `yaml:"fault_trigger_port" json:"fault_trigger_port"`
Nodes []Nodes `yaml:"nodes" json:"nodes"`
Expand All @@ -27,9 +32,12 @@ type Nodes struct {
// NewConfig creates a new config.
func NewConfig() *Config {
cfg := &Config{}
flag.StringVar(&cfg.configFile, "config", "/etc/e2e/config.yaml", "Config file")
flag.StringVar(&cfg.configFile, "config", "", "Config file")
flag.StringVar(&cfg.LogDir, "log-dir", "/logDir", "log directory")
flag.IntVar(&cfg.FaultTriggerPort, "fault-trigger-port", 23332, "the http port of fault trigger service")
flag.StringVar(&cfg.TidbVersions, "tidb-versions", "v2.1.3,v2.1.4", "tidb versions")
flag.StringVar(&cfg.OperatorTag, "operator-tag", "master", "operator tag used to choose charts")
flag.StringVar(&cfg.OperatorImage, "operator-image", "pingcap/tidb-operator:latest", "operator image")

return cfg
}
Expand Down Expand Up @@ -63,3 +71,18 @@ func (c *Config) configFromFile(path string) error {

return nil
}

func (c *Config) GetInitTidbVersion() (string, error) {
tidbVersions := strings.Split(c.TidbVersions, ",")
if len(tidbVersions) == 0 {
return "", fmt.Errorf("init tidb versions can not be nil")
}

return tidbVersions[0], nil
}

func (c *Config) GetUpgradeTidbVersions() []string {
tidbVersions := strings.Split(c.TidbVersions, ",")

return tidbVersions[1:]
}
4 changes: 4 additions & 0 deletions tests/images/e2e/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,8 @@ RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VER
rm -rf linux-amd64 && \
rm helm-${HELM_VERSION}-linux-amd64.tar.gz

ADD tidb-operator /charts/e2e/tidb-operator
ADD tidb-cluster /charts/e2e/tidb-cluster
ADD tidb-backup /charts/e2e/tidb-backup

ADD bin/e2e /usr/local/bin/e2e
45 changes: 45 additions & 0 deletions tests/manifests/e2e/e2e.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: tidb-operator-e2e
subjects:
- kind: ServiceAccount
namespace: tidb-operator-e2e
name: tidb-operator-e2e
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: ServiceAccount
apiVersion: v1
metadata:
namespace: tidb-operator-e2e
name: tidb-operator-e2e
---
apiVersion: v1
kind: Pod
metadata:
namespace: tidb-operator-e2e
name: tidb-operator-e2e
spec:
serviceAccount: tidb-operator-e2e
containers:
- name: tidb-operator-e2e
image: ""
imagePullPolicy: Always
command:
- /usr/local/bin/e2e
- --operator-tag=e2e
- --operator-image=pingcap/tidb-operator:latest
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
- --operator-image=pingcap/tidb-operator:latest
- --operator-image=localhost:5000/pingcap/tidb-operator:latest

- --tidb-versions=v2.1.3,v2.1.4
volumeMounts:
- mountPath: /logDir
name: logdir
volumes:
- name: logdir
hostPath:
path: /var/log
type: Directory
restartPolicy: Never
32 changes: 32 additions & 0 deletions tests/manifests/stability/stability-configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: tidb-operator-e2e
name: tidb-operator-e2e-config
data:
e2e-config: |-
nodes:
- physical_node: 172.16.4.39
nodes:
- 172.16.4.171
- 172.16.4.172
- 172.16.4.173
- physical_node: 172.16.4.40
nodes:
- 172.16.4.174
- 172.16.4.175
- 172.16.4.176
etcds:
- physical_node: 172.16.4.39
nodes:
- 172.16.4.171
- 172.16.4.172
- 172.16.4.173
apiservers:
- physical_node: 172.16.4.39
nodes:
- 172.16.4.171
- 172.16.4.172
- 172.16.4.173


Loading