diff --git a/charts/tidb-cluster/templates/_helpers.tpl b/charts/tidb-cluster/templates/_helpers.tpl index 0791ff1541..3cefc5e1b5 100644 --- a/charts/tidb-cluster/templates/_helpers.tpl +++ b/charts/tidb-cluster/templates/_helpers.tpl @@ -25,4 +25,63 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- define "cluster.name" -}} {{- default .Release.Name .Values.clusterName }} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{/* +Encapsulate PD configmap data for consistent digest calculation +*/}} +{{- define "pd-configmap.data" -}} +startup-script: |- +{{ tuple "scripts/_start_pd.sh.tpl" . | include "helm-toolkit.utils.template" | indent 2 }} +config-file: |- + {{- if .Values.pd.config }} +{{ .Values.pd.config | indent 2 }} + {{- else }} +{{ tuple "config/_pd-config.tpl" . | include "helm-toolkit.utils.template" | indent 2 }} + {{- end -}} +{{- end -}} + +{{- define "pd-configmap.data-digest" -}} +{{ include "pd-configmap.data" . | sha256sum | trunc 8 }} +{{- end -}} + +{{/* +Encapsulate TiKV configmap data for consistent digest calculation +*/}} +{{- define "tikv-configmap.data" -}} +startup-script: |- +{{ tuple "scripts/_start_tikv.sh.tpl" . | include "helm-toolkit.utils.template" | indent 2 }} +config-file: |- + {{- if .Values.tikv.config }} +{{ .Values.tikv.config | indent 2 }} + {{- else }} +{{ tuple "config/_tikv-config.tpl" . | include "helm-toolkit.utils.template" | indent 2 }} + {{- end -}} +{{- end -}} + +{{- define "tikv-configmap.data-digest" -}} +{{ include "tikv-configmap.data" . | sha256sum | trunc 8 }} +{{- end -}} + +{{/* +Encapsulate TiDB configmap data for consistent digest calculation +*/}} +{{- define "tidb-configmap.data" -}} +startup-script: |- +{{ tuple "scripts/_start_tidb.sh.tpl" . | include "helm-toolkit.utils.template" | indent 2 }} + {{- if .Values.tidb.initSql }} +init-sql: |- +{{ .Values.tidb.initSql | indent 2 }} + {{- end }} +config-file: |- + {{- if .Values.tidb.config }} +{{ .Values.tidb.config | indent 2 }} + {{- else }} +{{ tuple "config/_tidb-config.tpl" . | include "helm-toolkit.utils.template" | indent 2 }} + {{- end -}} +{{- end -}} + +{{- define "tidb-configmap.data-digest" -}} +{{ include "tidb-configmap.data" . | sha256sum | trunc 8 }} +{{- end -}} + diff --git a/charts/tidb-cluster/templates/pd-configmap.yaml b/charts/tidb-cluster/templates/pd-configmap.yaml index 683a7a7f1d..53bdcd32d1 100644 --- a/charts/tidb-cluster/templates/pd-configmap.yaml +++ b/charts/tidb-cluster/templates/pd-configmap.yaml @@ -1,7 +1,11 @@ apiVersion: v1 kind: ConfigMap metadata: +{{- if .Values.enableConfigMapRollout }} + name: {{ template "cluster.name" . }}-pd-{{ template "pd-configmap.data-digest" . }} +{{- else }} name: {{ template "cluster.name" . }}-pd +{{- end }} labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -9,12 +13,4 @@ metadata: app.kubernetes.io/component: pd helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} data: - startup-script: |- -{{ tuple "scripts/_start_pd.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - - config-file: |- - {{- if .Values.pd.config }} -{{ .Values.pd.config | indent 4 }} - {{- else }} -{{ tuple "config/_pd-config.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - {{- end -}} +{{ include "pd-configmap.data" . | indent 2 }} diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index 8eab712c3c..c8e85bafd3 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -2,6 +2,12 @@ apiVersion: pingcap.com/v1alpha1 kind: TidbCluster metadata: name: {{ template "cluster.name" . }} +{{- if .Values.enableConfigMapRollout }} + annotations: + pingcap.com/pd.{{ template "cluster.name" . }}-pd.sha: {{ template "pd-configmap.data-digest" . }} + pingcap.com/tikv.{{ template "cluster.name" . }}-tikv.sha: {{ template "tikv-configmap.data-digest" . }} + pingcap.com/tidb.{{ template "cluster.name" . }}-tidb.sha: {{ template "tidb-configmap.data-digest" . }} +{{- end }} labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} diff --git a/charts/tidb-cluster/templates/tidb-configmap.yaml b/charts/tidb-cluster/templates/tidb-configmap.yaml index 6d01bda5d4..f27cf32408 100644 --- a/charts/tidb-cluster/templates/tidb-configmap.yaml +++ b/charts/tidb-cluster/templates/tidb-configmap.yaml @@ -1,7 +1,11 @@ apiVersion: v1 kind: ConfigMap metadata: +{{- if .Values.enableConfigMapRollout }} + name: {{ template "cluster.name" . }}-tidb-{{ template "tidb-configmap.data-digest" . }} +{{- else }} name: {{ template "cluster.name" . }}-tidb +{{- end }} labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -9,15 +13,4 @@ metadata: app.kubernetes.io/component: tidb helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} data: - startup-script: |- -{{ tuple "scripts/_start_tidb.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - {{- if .Values.tidb.initSql }} - init-sql: |- -{{ .Values.tidb.initSql | indent 4 }} - {{- end }} - config-file: |- - {{- if .Values.tidb.config }} -{{ .Values.tidb.config | indent 4 }} - {{- else }} -{{ tuple "config/_tidb-config.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - {{- end -}} +{{ include "tidb-configmap.data" . | indent 2 }} diff --git a/charts/tidb-cluster/templates/tidb-initializer-job.yaml b/charts/tidb-cluster/templates/tidb-initializer-job.yaml index e4c3eef9eb..6539594f57 100644 --- a/charts/tidb-cluster/templates/tidb-initializer-job.yaml +++ b/charts/tidb-cluster/templates/tidb-initializer-job.yaml @@ -3,6 +3,8 @@ apiVersion: batch/v1 kind: Job metadata: name: {{ template "cluster.name" . }}-tidb-initializer + annotations: + "helm.sh/hook": post-install labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -44,7 +46,11 @@ spec: {{- if .Values.tidb.initSql }} - name: init-sql configMap: + {{- if .Values.enableConfigMapRollout }} + name: {{ template "cluster.name" . }}-tidb-{{ template "tidb-configmap.data-digest" . }} + {{- else }} name: {{ template "cluster.name" . }}-tidb + {{- end }} items: - key: init-sql path: init.sql diff --git a/charts/tidb-cluster/templates/tikv-configmap.yaml b/charts/tidb-cluster/templates/tikv-configmap.yaml index 8d80451d6d..434a366fca 100644 --- a/charts/tidb-cluster/templates/tikv-configmap.yaml +++ b/charts/tidb-cluster/templates/tikv-configmap.yaml @@ -1,7 +1,11 @@ apiVersion: v1 kind: ConfigMap metadata: +{{- if .Values.enableConfigMapRollout }} + name: {{ template "cluster.name" . }}-tikv-{{ template "tikv-configmap.data-digest" . }} +{{- else }} name: {{ template "cluster.name" . }}-tikv +{{- end }} labels: app.kubernetes.io/name: {{ template "chart.name" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -9,12 +13,4 @@ metadata: app.kubernetes.io/component: tikv helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} data: - startup-script: |- -{{ tuple "scripts/_start_tikv.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - - config-file: |- - {{- if .Values.tikv.config }} -{{ .Values.tikv.config | indent 4 }} - {{- else }} -{{ tuple "config/_tikv-config.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - {{- end -}} +{{ include "tikv-configmap.data" . | indent 2 }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 16065aea12..8a9a5a4f90 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -40,6 +40,13 @@ discovery: cpu: 80m memory: 50Mi +# Whether enable ConfigMap Rollout management. +# When enabling, change of ConfigMap will trigger a graceful rolling-update of the component. +# This feature is only available in tidb-operator v1.0 or higher. +# Note: Switch this variable against an existing cluster will cause an rolling-update of each component even +# if the ConfigMap was not changed. +enableConfigMapRollout: false + pd: replicas: 3 image: pingcap/pd:v2.1.8 diff --git a/docs/operation-guide.md b/docs/operation-guide.md index 8ab0fc53f2..daed8a12eb 100644 --- a/docs/operation-guide.md +++ b/docs/operation-guide.md @@ -104,6 +104,12 @@ $ helm upgrade ${releaseName} charts/tidb-cluster For minor version upgrade, updating the `image` should be enough. When TiDB major version is out, the better way to update is to fetch the new charts from tidb-operator and then merge the old values.yaml with new values.yaml. And then upgrade as above. +## Change TiDB cluster Configuration + +Since `v1.0.0`, TiDB operator can perform rolling-update on configuration updates. This feature is disabled by default in favor of backward compatibility, you can enable it by setting `enableConfigMapRollout` to `true` in your helm values file. + +> WARN: changing this variable against a running cluster will trigger an rolling-update of PD/TiKV/TiDB pods even if there's no configuration change. + ## Destroy TiDB cluster To destroy TiDB cluster, run the following command: diff --git a/go.mod b/go.mod index 6b3ce80983..b4cfd7202a 100644 --- a/go.mod +++ b/go.mod @@ -61,12 +61,14 @@ require ( github.com/onsi/gomega v1.4.1 github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pborman/uuid v1.2.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pingcap/check v0.0.0-20171206051426-1c287c953996 // indirect github.com/pingcap/errors v0.11.0 github.com/pingcap/kvproto v0.0.0-20180606093822-b7ba8ea1c0b4 github.com/pingcap/pd v2.1.0-beta+incompatible + github.com/pingcap/tidb v2.1.0-beta+incompatible github.com/pkg/errors v0.8.0 // indirect github.com/prometheus/client_golang v0.8.0 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect @@ -80,6 +82,8 @@ require ( github.com/spf13/cobra v0.0.3 github.com/spf13/pflag v1.0.3 github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 // indirect + github.com/uber/jaeger-client-go v2.16.0+incompatible // indirect + github.com/uber/jaeger-lib v2.0.0+incompatible // indirect github.com/ugorji/go v1.1.1 // indirect github.com/unrolled/render v0.0.0-20180807193321-4206df6ff701 // indirect github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect diff --git a/go.sum b/go.sum index 1865a3364c..e9281d27ab 100644 --- a/go.sum +++ b/go.sum @@ -148,6 +148,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -160,6 +162,8 @@ github.com/pingcap/kvproto v0.0.0-20180606093822-b7ba8ea1c0b4 h1:fYmrrTx2rWqgvlv github.com/pingcap/kvproto v0.0.0-20180606093822-b7ba8ea1c0b4/go.mod h1:0gwbe1F2iBIjuQ9AH0DbQhL+Dpr5GofU8fgYyXk+ykk= github.com/pingcap/pd v2.1.0-beta+incompatible h1:DZrskt6POM+zhTZvUjUJKXV9OVWpV7cdcq1tXQGOiCY= github.com/pingcap/pd v2.1.0-beta+incompatible/go.mod h1:nD3+EoYes4+aNNODO99ES59V83MZSI+dFbhyr667a0E= +github.com/pingcap/tidb v2.1.0-beta+incompatible h1:SQUmscnvvjHLjaIycQqtHujBahUnlKwTz6dQQhqgGSc= +github.com/pingcap/tidb v2.1.0-beta+incompatible/go.mod h1:I8C6jrPINP2rrVunTRd7C9fRRhQrtR43S1/CL5ix/yQ= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -192,6 +196,10 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= +github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= +github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.1 h1:gmervu+jDMvXTbcHQ0pd2wee85nEoE0BsVyEuzkfK8w= github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/unrolled/render v0.0.0-20180807193321-4206df6ff701 h1:BJ/T25enw0WcbWqV132hGXRQdqCqe9XBzqh4AWVH7Bc= diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index faee7142f2..ba2b69d526 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -155,6 +155,24 @@ func AnnProm(port int32) map[string]string { } } +// MemberConfigMapName returns the default ConfigMap name of the specified member type +func MemberConfigMapName(tc *v1alpha1.TidbCluster, member v1alpha1.MemberType) string { + nameKey := fmt.Sprintf("%s-%s", tc.Name, member) + return nameKey + getConfigMapSuffix(tc, member.String(), nameKey) +} + +// getConfigMapSuffix return the ConfigMap name suffix +func getConfigMapSuffix(tc *v1alpha1.TidbCluster, component string, name string) string { + if tc.Annotations == nil { + return "" + } + sha := tc.Annotations[fmt.Sprintf("pingcap.com/%s.%s.sha", component, name)] + if len(sha) == 0 { + return "" + } + return "-" + sha +} + // setIfNotEmpty set the value into map when value in not empty func setIfNotEmpty(container map[string]string, key, value string) { if value != "" { diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 9c9b0a75db..503c921a4b 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -185,6 +185,82 @@ func TestAnnProm(t *testing.T) { g.Expect(ann["prometheus.io/port"]).To(Equal("9090")) } +func TestMemberConfigMapName(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + annotations map[string]string + tcName string + member v1alpha1.MemberType + expectFn func(*GomegaWithT, string) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + tc := &v1alpha1.TidbCluster{} + tc.Name = test.tcName + tc.Annotations = test.annotations + test.expectFn(g, MemberConfigMapName(tc, test.member)) + } + tests := []testcase{ + { + name: "backward compatible when no annotations set", + annotations: map[string]string{}, + tcName: "cluster-name", + member: v1alpha1.TiKVMemberType, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).To(Equal("cluster-name-tikv")) + }, + }, + { + name: "configmap digest presented", + annotations: map[string]string{ + "pingcap.com/tikv.cluster-name-tikv.sha": "uuuuuuuu", + }, + tcName: "cluster-name", + member: v1alpha1.TiKVMemberType, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).To(Equal("cluster-name-tikv-uuuuuuuu")) + }, + }, + { + name: "nil annotations", + annotations: nil, + tcName: "cluster-name", + member: v1alpha1.TiKVMemberType, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).To(Equal("cluster-name-tikv")) + }, + }, + { + name: "annotation presented with empty value empty", + annotations: map[string]string{ + "pingcap.com/tikv.cluster-name-tikv.sha": "", + }, + tcName: "cluster-name", + member: v1alpha1.TiKVMemberType, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).To(Equal("cluster-name-tikv")) + }, + }, + { + name: "no matched annotation key", + annotations: map[string]string{ + "pingcap.com/pd.cluster-name-tikv.sha": "", + }, + tcName: "cluster-name", + member: v1alpha1.TiKVMemberType, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).To(Equal("cluster-name-tikv")) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + func TestSetIfNotEmpty(t *testing.T) { g := NewGomegaWithT(t) diff --git a/pkg/controller/tidb_control.go b/pkg/controller/tidb_control.go index ad3d685198..0907fbda76 100644 --- a/pkg/controller/tidb_control.go +++ b/pkg/controller/tidb_control.go @@ -20,6 +20,7 @@ import ( "net/http" "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" + "github.com/pingcap/tidb/config" ) const ( @@ -40,6 +41,8 @@ type TiDBControlInterface interface { ResignDDLOwner(tc *v1alpha1.TidbCluster, ordinal int32) (bool, error) // Get TIDB info return tidb's dbInfo GetInfo(tc *v1alpha1.TidbCluster, ordinal int32) (*dbInfo, error) + // GetSettings return the TiDB instance settings + GetSettings(tc *v1alpha1.TidbCluster, ordinal int32) (*config.Config, error) } // defaultTiDBControl is default implementation of TiDBControlInterface. @@ -127,6 +130,37 @@ func (tdc *defaultTiDBControl) GetInfo(tc *v1alpha1.TidbCluster, ordinal int32) return &info, nil } +func (tdc *defaultTiDBControl) GetSettings(tc *v1alpha1.TidbCluster, ordinal int32) (*config.Config, error) { + tcName := tc.GetName() + ns := tc.GetNamespace() + + hostName := fmt.Sprintf("%s-%d", TiDBMemberName(tcName), ordinal) + url := fmt.Sprintf("http://%s.%s.%s:10080/settings", hostName, TiDBPeerMemberName(tcName), ns) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := tdc.httpClient.Do(req) + if err != nil { + return nil, err + } + defer DeferClose(res.Body, &err) + if res.StatusCode != http.StatusOK { + errMsg := fmt.Errorf(fmt.Sprintf("Error response %v", res.StatusCode)) + return nil, errMsg + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + info := config.Config{} + err = json.Unmarshal(body, &info) + if err != nil { + return nil, err + } + return &info, nil +} + func (tdc *defaultTiDBControl) getBodyOK(apiURL string) ([]byte, error) { res, err := tdc.httpClient.Get(apiURL) if err != nil { @@ -152,6 +186,7 @@ type FakeTiDBControl struct { notDDLOwner bool tidbInfo *dbInfo getInfoError error + tidbConfig *config.Config } // NewFakeTiDBControl returns a FakeTiDBControl instance @@ -185,3 +220,7 @@ func (ftd *FakeTiDBControl) ResignDDLOwner(tc *v1alpha1.TidbCluster, ordinal int func (ftd *FakeTiDBControl) GetInfo(tc *v1alpha1.TidbCluster, ordinal int32) (*dbInfo, error) { return ftd.tidbInfo, ftd.getInfoError } + +func (ftd *FakeTiDBControl) GetSettings(tc *v1alpha1.TidbCluster, ordinal int32) (*config.Config, error) { + return ftd.tidbConfig, ftd.getInfoError +} diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index 9e2da56d28..3127af5f89 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -404,7 +404,7 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) ns := tc.Namespace tcName := tc.Name instanceName := tc.GetLabels()[label.InstanceLabelKey] - pdConfigMap := controller.PDMemberName(tcName) + pdConfigMap := controller.MemberConfigMapName(tc, v1alpha1.PDMemberType) annMount, annVolume := annotationsMountVolume() volMounts := []corev1.VolumeMount{ diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index 19b145fb0b..dcf88833da 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -220,7 +220,7 @@ func (tmm *tidbMemberManager) getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbClust ns := tc.GetNamespace() tcName := tc.GetName() instanceName := tc.GetLabels()[label.InstanceLabelKey] - tidbConfigMap := controller.TiDBMemberName(tcName) + tidbConfigMap := controller.MemberConfigMapName(tc, v1alpha1.TiDBMemberType) annMount, annVolume := annotationsMountVolume() volMounts := []corev1.VolumeMount{ diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index bbb2ee02b5..18a4ab0b0b 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -270,7 +270,7 @@ func (tkmm *tikvMemberManager) getNewServiceForTidbCluster(tc *v1alpha1.TidbClus func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, error) { ns := tc.GetNamespace() tcName := tc.GetName() - tikvConfigMap := controller.TiKVMemberName(tcName) + tikvConfigMap := controller.MemberConfigMapName(tc, v1alpha1.TiKVMemberType) annMount, annVolume := annotationsMountVolume() volMounts := []corev1.VolumeMount{ annMount, diff --git a/tests/actions.go b/tests/actions.go index ed2e1e5f74..4b97612efe 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -75,6 +75,7 @@ func NewOperatorActions(cli versioned.Interface, cli: cli, kubeCli: kubeCli, pdControl: controller.NewDefaultPDControl(), + tidbControl: controller.NewDefaultTiDBControl(), pollInterval: pollInterval, cfg: cfg, } @@ -166,6 +167,7 @@ type operatorActions struct { cli versioned.Interface kubeCli kubernetes.Interface pdControl controller.PDControlInterface + tidbControl controller.TiDBControlInterface pollInterval time.Duration cfg *Config clusterEvents map[string]*clusterEvent @@ -200,25 +202,31 @@ type OperatorConfig struct { } type TidbClusterConfig struct { - BackupName string - Namespace string - ClusterName string - OperatorTag string - PDImage string - TiKVImage string - TiDBImage string - StorageClassName string - Password string - InitSql string - RecordCount string - InsertBatchSize string - Resources map[string]string - Args map[string]string - blockWriter *blockwriter.BlockWriterCase - Monitor bool - UserName string - InitSecretName string - BackupSecretName string + BackupName string + Namespace string + ClusterName string + OperatorTag string + PDImage string + TiKVImage string + TiDBImage string + StorageClassName string + Password string + InitSql string + RecordCount string + InsertBatchSize string + Resources map[string]string + Args map[string]string + blockWriter *blockwriter.BlockWriterCase + Monitor bool + UserName string + InitSecretName string + BackupSecretName string + EnableConfigMapRollout bool + + PDMaxReplicas int + TiKVGrpcConcurrency int + TiDBTokenLimit int + PDLogLevel string BlockWriteConfig blockwriter.Config GrafanaClient *metrics.Client @@ -274,6 +282,20 @@ func (tc *TidbClusterConfig) TidbClusterHelmSetString(m map[string]string) strin "tidb.passwordSecretName": tc.InitSecretName, "tidb.initSql": tc.InitSql, "monitor.create": strconv.FormatBool(tc.Monitor), + "enableConfigMapRollout": strconv.FormatBool(tc.EnableConfigMapRollout), + } + + if tc.PDMaxReplicas > 0 { + set["pd.maxReplicas"] = strconv.Itoa(tc.PDMaxReplicas) + } + if tc.TiKVGrpcConcurrency > 0 { + set["tikv.grpcConcurrency"] = strconv.Itoa(tc.TiKVGrpcConcurrency) + } + if tc.TiDBTokenLimit > 0 { + set["tidb.tokenLimit"] = strconv.Itoa(tc.TiDBTokenLimit) + } + if len(tc.PDLogLevel) > 0 { + set["pd.logLevel"] = tc.PDLogLevel } for k, v := range tc.Resources { @@ -465,6 +487,12 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error { return fmt.Errorf("failed to delete jobs: %v, %s", err, string(res)) } + // delete all configmaps + allConfigMaps := label.New().Instance(info.ClusterName).String() + if res, err := exec.Command("kubectl", "delete", "configmaps", "-n", info.Namespace, "-l", allConfigMaps).CombinedOutput(); err != nil { + return fmt.Errorf("failed to delete configmaps: %v, %s", err, string(res)) + } + patchPVCmd := fmt.Sprintf("kubectl get pv | grep %s | grep %s | awk '{print $1}' | "+ "xargs -I {} kubectl patch pv {} -p '{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Delete\"}}'", info.Namespace, info.ClusterName) @@ -556,6 +584,12 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error return false, nil } } + if info.EnableConfigMapRollout { + glog.V(4).Info("check tidb cluster configuration synced") + if b, err := oa.checkTidbClusterConfigUpdated(tc, info); !b && err == nil { + return false, nil + } + } return true, nil }); err != nil { glog.Errorf("check tidb cluster status failed: %s", err.Error()) @@ -1364,6 +1398,68 @@ func (oa *operatorActions) monitorNormal(clusterInfo *TidbClusterConfig) (bool, return true, nil } +func (oa *operatorActions) checkTidbClusterConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) (bool, error) { + if ok := oa.checkPdConfigUpdated(tc, clusterInfo); !ok { + return false, nil + } + if ok := oa.checkTiKVConfigUpdated(tc, clusterInfo); !ok { + return false, nil + } + if ok := oa.checkTiDBConfigUpdated(tc, clusterInfo); !ok { + return false, nil + } + return true, nil +} + +func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool { + + pdCli := oa.pdControl.GetPDClient(tc) + config, err := pdCli.GetConfig() + if err != nil { + glog.Errorf("failed to get PD configuraion from tidb cluster [%s/%s]", tc.Namespace, tc.Name) + return false + } + if len(clusterInfo.PDLogLevel) > 0 && clusterInfo.PDLogLevel != config.Log.Level { + glog.Errorf("check [%s/%s] PD logLevel configuration updated failed: desired [%s], actual [%s] not equal", + tc.Namespace, + tc.Name, + clusterInfo.PDLogLevel, + config.Log.Level) + return false + } + // TODO: fix #487 PD configuration update for persisted configurations + //if clusterInfo.PDMaxReplicas > 0 && config.Replication.MaxReplicas != uint64(clusterInfo.PDMaxReplicas) { + // glog.Errorf("check [%s/%s] PD maxReplicas configuration updated failed: desired [%d], actual [%d] not equal", + // tc.Namespace, + // tc.Name, + // clusterInfo.PDMaxReplicas, + // config.Replication.MaxReplicas) + // return false + //} + return true +} + +func (oa *operatorActions) checkTiDBConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool { + for i := int32(0); i < tc.Spec.TiDB.Replicas; i += 1 { + config, err := oa.tidbControl.GetSettings(tc, i) + if err != nil { + glog.Errorf("failed to get TiDB configuration from cluster [%s/%s], ordinal: %d, error: %v", tc.Namespace, tc.Name, i, err) + return false + } + if clusterInfo.TiDBTokenLimit > 0 && uint(clusterInfo.TiDBTokenLimit) != config.TokenLimit { + glog.Errorf("check [%s/%s] TiDB instance [%d] configuration updated failed: desired [%d], actual [%d] not equal", + tc.Namespace, tc.Name, i, clusterInfo.TiDBTokenLimit, config.TokenLimit) + return false + } + } + return true +} + +func (oa *operatorActions) checkTiKVConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool { + // TODO: check if TiKV configuration updated + return true +} + func (oa *operatorActions) checkPrometheus(clusterInfo *TidbClusterConfig) error { ns := clusterInfo.Namespace tcName := clusterInfo.ClusterName diff --git a/tests/cluster_info.go b/tests/cluster_info.go index 7470b1cc77..8fc5b05268 100644 --- a/tests/cluster_info.go +++ b/tests/cluster_info.go @@ -52,6 +52,27 @@ func (tc *TidbClusterConfig) UpgradeAll(tag string) *TidbClusterConfig { UpgradeTiDB("pingcap/tidb:" + tag) } +// FIXME: update of PD configuration do not work now #487 +func (tc *TidbClusterConfig) UpdatePdMaxReplicas(maxReplicas int) *TidbClusterConfig { + tc.PDMaxReplicas = maxReplicas + return tc +} + +func (tc *TidbClusterConfig) UpdateTiKVGrpcConcurrency(concurrency int) *TidbClusterConfig { + tc.TiKVGrpcConcurrency = concurrency + return tc +} + +func (tc *TidbClusterConfig) UpdateTiDBTokenLimit(tokenLimit int) *TidbClusterConfig { + tc.TiDBTokenLimit = tokenLimit + return tc +} + +func (tc *TidbClusterConfig) UpdatePDLogLevel(logLevel string) *TidbClusterConfig { + tc.PDLogLevel = logLevel + return tc +} + func (tc *TidbClusterConfig) DSN(dbName string) string { return fmt.Sprintf("root:%s@tcp(%s-tidb.%s:4000)/%s", tc.Password, tc.ClusterName, tc.Namespace, dbName) } diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index f39d5495bb..464462b42f 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -99,6 +99,11 @@ func main() { BatchSize: 1, RawSize: 1, }, + EnableConfigMapRollout: true, + PDMaxReplicas: 3, + TiKVGrpcConcurrency: 4, + TiDBTokenLimit: 1000, + PDLogLevel: "info", }, { Namespace: name2, @@ -137,6 +142,11 @@ func main() { BatchSize: 1, RawSize: 1, }, + EnableConfigMapRollout: false, + PDMaxReplicas: 3, + TiKVGrpcConcurrency: 4, + TiDBTokenLimit: 1000, + PDLogLevel: "info", }, } @@ -193,6 +203,23 @@ func main() { } } + // update configuration on the fly + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo. + UpdatePdMaxReplicas(conf.PDMaxReplicas). + UpdatePDLogLevel("debug"). + UpdateTiKVGrpcConcurrency(conf.TiKVGrpcConcurrency). + UpdateTiDBTokenLimit(conf.TiDBTokenLimit) + if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } + } + } + // after upgrade cluster, clean webhook oa.CleanWebHookAndService(operatorInfo) diff --git a/tests/config.go b/tests/config.go index e73b6cf841..817f1e5ed3 100644 --- a/tests/config.go +++ b/tests/config.go @@ -37,6 +37,10 @@ type Config struct { CertFile string KeyFile string + PDMaxReplicas int `yaml:"pd_max_replicas" json:"pd_max_replicas"` + TiKVGrpcConcurrency int `yaml:"tikv_grpc_concurrency" json:"tikv_grpc_concurrency"` + TiDBTokenLimit int `yaml:"tidb_token_limit" json:"tidb_token_limit"` + // Block writer BlockWriter blockwriter.Config `yaml:"block_writer,omitempty"` @@ -55,6 +59,11 @@ type Nodes struct { // NewConfig creates a new config. func NewConfig() (*Config, error) { cfg := &Config{ + + PDMaxReplicas: 5, + TiDBTokenLimit: 1024, + TiKVGrpcConcurrency: 8, + BlockWriter: blockwriter.Config{ TableNum: defaultTableNum, Concurrency: defaultConcurrency,