From c7fdc73b3c521ac2f5ab5843a77d405af75ef25f Mon Sep 17 00:00:00 2001 From: Aylei Date: Mon, 16 Sep 2019 11:24:41 +0800 Subject: [PATCH 1/2] Apply suggestions from code review Co-Authored-By: weekface --- .../tidb-cluster/templates/tidb-cluster.yaml | 6 + charts/tidb-cluster/values.yaml | 13 ++ deploy/aws/main.tf | 2 +- deploy/modules/aws/tidb-cluster/data.tf | 2 +- deploy/modules/aws/tidb-cluster/local.tf | 57 ++++---- .../tidb-cluster/templates/userdata.sh.tpl | 1 + .../aws/tidb-cluster/values/default.yaml | 11 ++ go.mod | 2 +- go.sum | 4 +- pkg/apis/pingcap.com/v1alpha1/types.go | 45 +++--- .../v1alpha1/zz_generated.deepcopy.go | 138 +++++++----------- pkg/manager/member/pd_member_manager.go | 7 +- pkg/manager/member/tidb_member_manager.go | 15 +- pkg/manager/member/tikv_member_manager.go | 7 +- 14 files changed, 159 insertions(+), 151 deletions(-) diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index fd4e893521..afa4a38931 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -45,6 +45,8 @@ spec: annotations: {{ toYaml .Values.pd.annotations | indent 6 }} {{- end }} + podSecurityContext: +{{ toYaml .Values.pd.podSecurityContext | indent 6}} tikv: replicas: {{ .Values.tikv.replicas }} image: {{ .Values.tikv.image }} @@ -68,6 +70,8 @@ spec: {{ toYaml .Values.tikv.annotations | indent 6 }} {{- end }} maxFailoverCount: {{ .Values.tikv.maxFailoverCount | default 3 }} + podSecurityContext: +{{ toYaml .Values.tikv.podSecurityContext | indent 6}} tidb: replicas: {{ .Values.tidb.replicas }} image: {{ .Values.tidb.image }} @@ -87,6 +91,8 @@ spec: annotations: {{ toYaml .Values.tidb.annotations | indent 6 }} {{- end }} + podSecurityContext: +{{ toYaml .Values.tidb.podSecurityContext | indent 6}} binlogEnabled: {{ .Values.binlog.pump.create | default false }} maxFailoverCount: {{ .Values.tidb.maxFailoverCount | default 3 }} separateSlowLog: {{ .Values.tidb.separateSlowLog | default false }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index c4a7811201..0088c0213e 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -154,6 +154,10 @@ pd: # effect: "NoSchedule" annotations: {} + # Specify the security context of PD Pod. + # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + tikv: # Please refer to https://github.com/tikv/tikv/blob/master/etc/config-template.toml for the default # tikv configurations (change to the tags of your tikv version), @@ -234,6 +238,10 @@ tikv: # maxFailoverCount is used to configure the maximum number of TiKV nodes that TiDB Operator can create when failover occurs. maxFailoverCount: 3 + # Specify the security context of TiKV Pod. + # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + tidb: # Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default # tidb configurations(change to the tags of your tidb version), @@ -290,6 +298,11 @@ tidb: # value: tidb # effect: "NoSchedule" annotations: {} + + # Specify the security context of TiDB Pod. + # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + maxFailoverCount: 3 service: type: NodePort diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index 36ce5dd0ec..5af8676a59 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -3,7 +3,7 @@ provider "aws" { } locals { - eks = module.tidb-operator.eks + eks = module.tidb-operator.eks subnets = module.vpc.private_subnets } diff --git a/deploy/modules/aws/tidb-cluster/data.tf b/deploy/modules/aws/tidb-cluster/data.tf index a44aebbde6..45b7d766d5 100644 --- a/deploy/modules/aws/tidb-cluster/data.tf +++ b/deploy/modules/aws/tidb-cluster/data.tf @@ -12,7 +12,7 @@ data "aws_ami" "eks_worker" { data "template_file" "userdata" { template = file("${path.module}/templates/userdata.sh.tpl") - count = local.worker_group_count + count = length(local.tidb_cluster_worker_groups) vars = { cluster_name = var.eks.cluster_id diff --git a/deploy/modules/aws/tidb-cluster/local.tf b/deploy/modules/aws/tidb-cluster/local.tf index 0d1aa60a44..0b96c67937 100644 --- a/deploy/modules/aws/tidb-cluster/local.tf +++ b/deploy/modules/aws/tidb-cluster/local.tf @@ -40,13 +40,13 @@ locals { tidb_cluster_worker_groups = [ { - name = "${var.cluster_name}-pd" - key_name = var.ssh_key_name - instance_type = var.pd_instance_type - root_volume_size = "50" - public_ip = false + name = "${var.cluster_name}-pd" + key_name = var.ssh_key_name + instance_type = var.pd_instance_type + root_volume_size = "50" + public_ip = false # the space separator is safe when the extra args is empty or prefixed by spaces (the same hereafter) - kubelet_extra_args = join(" ", + kubelet_extra_args = join(" ", [ "--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule", "--node-labels=dedicated=${var.cluster_name}-pd,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}", @@ -58,12 +58,12 @@ locals { # additional_userdata = file("userdata.sh") }, { - name = "${var.cluster_name}-tikv" - key_name = var.ssh_key_name - instance_type = var.tikv_instance_type - root_volume_size = "50" - public_ip = false - kubelet_extra_args = join(" ", + name = "${var.cluster_name}-tikv" + key_name = var.ssh_key_name + instance_type = var.tikv_instance_type + root_volume_size = "50" + public_ip = false + kubelet_extra_args = join(" ", [ "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule", "--node-labels=dedicated=${var.cluster_name}-tikv,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}", @@ -77,14 +77,15 @@ locals { suspended_processes = ["ReplaceUnhealthy"] }, { - name = "${var.cluster_name}-tidb" - key_name = var.ssh_key_name - instance_type = var.tidb_instance_type - root_volume_type = "gp2" - root_volume_size = "50" - public_ip = false - kubelet_extra_args = join(" ", + name = "${var.cluster_name}-tidb" + key_name = var.ssh_key_name + instance_type = var.tidb_instance_type + root_volume_type = "gp2" + root_volume_size = "50" + public_ip = false + kubelet_extra_args = join(" ", [ + "--allowed-unsafe-sysctls=\\\"net.*\\\"", "--register-with-taints=dedicated=${var.cluster_name}-tidb:NoSchedule", "--node-labels=dedicated=${var.cluster_name}-tidb,zone=${local.aws_zone_getter}", lookup(var.group_kubelet_extra_args, "tidb", var.kubelet_extra_args) @@ -94,20 +95,20 @@ locals { asg_max_size = var.tidb_count + 2 }, { - name = "${var.cluster_name}-monitor" - key_name = var.ssh_key_name - instance_type = var.monitor_instance_type - root_volume_type = "gp2" - root_volume_size = "50" - public_ip = false - kubelet_extra_args = join(" ", + name = "${var.cluster_name}-monitor" + key_name = var.ssh_key_name + instance_type = var.monitor_instance_type + root_volume_type = "gp2" + root_volume_size = "50" + public_ip = false + kubelet_extra_args = join(" ", [ "--node-labels=zone=${local.aws_zone_getter}", lookup(var.group_kubelet_extra_args, "monitor", var.kubelet_extra_args) ] ) - asg_desired_capacity = 1 - asg_max_size = 3 + asg_desired_capacity = 1 + asg_max_size = 3 } ] diff --git a/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl b/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl index b3077d77b0..681a0b55fc 100644 --- a/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl +++ b/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl @@ -10,6 +10,7 @@ root hard nofile 1000000 root soft core unlimited root soft stack 10240 EOF + # config docker ulimit cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service sed -i 's/LimitNOFILE=infinity/LimitNOFILE=1048576/' /etc/systemd/system/docker.service diff --git a/deploy/modules/aws/tidb-cluster/values/default.yaml b/deploy/modules/aws/tidb-cluster/values/default.yaml index 40e52d5d98..36145ba493 100644 --- a/deploy/modules/aws/tidb-cluster/values/default.yaml +++ b/deploy/modules/aws/tidb-cluster/values/default.yaml @@ -15,6 +15,17 @@ tidb: service.beta.kubernetes.io/aws-load-balancer-type: nlb service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' separateSlowLog: true + config: | + [log] + level = "info" + [performance] + tcp-keep-alive = true + podSecurityContext: + sysctls: + - name: net.ipv4.tcp_keepalive_time + value: "300" + - name: net.ipv4.tcp_keepalive_intvl + value: "300" monitor: storage: 100Gi storageClassName: ebs-gp2 diff --git a/go.mod b/go.mod index 5b8c13a998..539a0338e6 100644 --- a/go.mod +++ b/go.mod @@ -113,7 +113,7 @@ require ( k8s.io/apiserver v0.0.0-20190118115647-a748535592ba k8s.io/cli-runtime v0.0.0-20190118125240-caee4253d968 k8s.io/client-go v0.0.0-20190115164855-701b91367003 - k8s.io/code-generator v0.0.0-20191109100332-a9a0d9c0b3aa + k8s.io/code-generator v0.0.0-20191114215150-2a85f169f05f k8s.io/klog v1.0.0 k8s.io/kubernetes v1.12.5 k8s.io/metrics v0.0.0-20190118124808-33c1aed8dc65 // indirect diff --git a/go.sum b/go.sum index 218b43bdf7..0b67ff409c 100644 --- a/go.sum +++ b/go.sum @@ -360,8 +360,8 @@ k8s.io/cli-runtime v0.0.0-20190118125240-caee4253d968 h1:VXLj8aMvJEo14Utv+knJDs0 k8s.io/cli-runtime v0.0.0-20190118125240-caee4253d968/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM= k8s.io/client-go v0.0.0-20190115164855-701b91367003 h1:gQQC0U1hM6L808TYvGGO/5vhUisGw384axV7rqFUv04= k8s.io/client-go v0.0.0-20190115164855-701b91367003/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/code-generator v0.0.0-20191109100332-a9a0d9c0b3aa h1:4feCF84yk6VEXdpOwOhwB1YIwobejEwKBzgHY0xa9Co= -k8s.io/code-generator v0.0.0-20191109100332-a9a0d9c0b3aa/go.mod h1:fRFrKVixH946mn5PeglV2fvxbE86JesGi16bsWZ1xz4= +k8s.io/code-generator v0.0.0-20191114215150-2a85f169f05f h1:sqMKZZhZNG3WgglCBOn9zRJrbEnCeJ5cP+4cv9awWsk= +k8s.io/code-generator v0.0.0-20191114215150-2a85f169f05f/go.mod h1:Vh0irzg7dL9pFS4c8hFsali5txtbmse3MFS4zEH7Thg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/pkg/apis/pingcap.com/v1alpha1/types.go b/pkg/apis/pingcap.com/v1alpha1/types.go index bb92e1c5d7..22669aa35b 100644 --- a/pkg/apis/pingcap.com/v1alpha1/types.go +++ b/pkg/apis/pingcap.com/v1alpha1/types.go @@ -90,9 +90,10 @@ type TidbClusterSpec struct { TiKV TiKVSpec `json:"tikv,omitempty"` TiKVPromGateway TiKVPromGatewaySpec `json:"tikvPromGateway,omitempty"` // Services list non-headless services type used in TidbCluster - Services []Service `json:"services,omitempty"` - PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` - Timezone string `json:"timezone,omitempty"` + Services []Service `json:"services,omitempty"` + PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` + Timezone string `json:"timezone,omitempty"` + MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` } // TidbClusterStatus represents the current status of a tidb cluster. @@ -106,27 +107,21 @@ type TidbClusterStatus struct { // PDSpec contains details of PD member type PDSpec struct { ContainerSpec - Replicas int32 `json:"replicas"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - StorageClassName string `json:"storageClassName,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` + PodAttributesSpec + Replicas int32 `json:"replicas"` + StorageClassName string `json:"storageClassName,omitempty"` } // TiDBSpec contains details of PD member type TiDBSpec struct { ContainerSpec + PodAttributesSpec Replicas int32 `json:"replicas"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` StorageClassName string `json:"storageClassName,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` BinlogEnabled bool `json:"binlogEnabled,omitempty"` - MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` SeparateSlowLog bool `json:"separateSlowLog,omitempty"` SlowLogTailer TiDBSlowLogTailerSpec `json:"slowLogTailer,omitempty"` + MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` } // TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB @@ -137,14 +132,11 @@ type TiDBSlowLogTailerSpec struct { // TiKVSpec contains details of TiKV members type TiKVSpec struct { ContainerSpec - Privileged bool `json:"privileged,omitempty"` - Replicas int32 `json:"replicas"` - Affinity *corev1.Affinity `json:"affinity,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - StorageClassName string `json:"storageClassName,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` - MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` + PodAttributesSpec + Replicas int32 `json:"replicas"` + Privileged bool `json:"privileged,omitempty"` + StorageClassName string `json:"storageClassName,omitempty"` + MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` } // TiKVPromGatewaySpec runs as a sidecar with TiKVSpec @@ -160,6 +152,15 @@ type ContainerSpec struct { Limits *ResourceRequirement `json:"limits,omitempty"` } +// PodAttributesControlSpec is a spec of some general attributes of TiKV, TiDB and PD Pods +type PodAttributesSpec struct { + Affinity *corev1.Affinity `json:"affinity,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` +} + // Service represent service type used in TidbCluster type Service struct { Name string `json:"name,omitempty"` diff --git a/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go index 6e530d5555..8d7f98efac 100644 --- a/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go @@ -21,8 +21,8 @@ limitations under the License. package v1alpha1 import ( - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -90,32 +90,7 @@ func (in *PDMember) DeepCopy() *PDMember { func (in *PDSpec) DeepCopyInto(out *PDSpec) { *out = *in in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.PodAttributesSpec.DeepCopyInto(&out.PodAttributesSpec) return } @@ -134,7 +109,7 @@ func (in *PDStatus) DeepCopyInto(out *PDStatus) { *out = *in if in.StatefulSet != nil { in, out := &in.StatefulSet, &out.StatefulSet - *out = new(appsv1.StatefulSetStatus) + *out = new(v1.StatefulSetStatus) (*in).DeepCopyInto(*out) } if in.Members != nil { @@ -165,6 +140,53 @@ func (in *PDStatus) DeepCopy() *PDStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttributesSpec) DeepCopyInto(out *PodAttributesSpec) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PodSecurityContext != nil { + in, out := &in.PodSecurityContext, &out.PodSecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttributesSpec. +func (in *PodAttributesSpec) DeepCopy() *PodAttributesSpec { + if in == nil { + return nil + } + out := new(PodAttributesSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceRequirement) DeepCopyInto(out *ResourceRequirement) { *out = *in @@ -252,32 +274,7 @@ func (in *TiDBSlowLogTailerSpec) DeepCopy() *TiDBSlowLogTailerSpec { func (in *TiDBSpec) DeepCopyInto(out *TiDBSpec) { *out = *in in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.PodAttributesSpec.DeepCopyInto(&out.PodAttributesSpec) in.SlowLogTailer.DeepCopyInto(&out.SlowLogTailer) return } @@ -297,7 +294,7 @@ func (in *TiDBStatus) DeepCopyInto(out *TiDBStatus) { *out = *in if in.StatefulSet != nil { in, out := &in.StatefulSet, &out.StatefulSet - *out = new(appsv1.StatefulSetStatus) + *out = new(v1.StatefulSetStatus) (*in).DeepCopyInto(*out) } if in.Members != nil { @@ -365,32 +362,7 @@ func (in *TiKVPromGatewaySpec) DeepCopy() *TiKVPromGatewaySpec { func (in *TiKVSpec) DeepCopyInto(out *TiKVSpec) { *out = *in in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.PodAttributesSpec.DeepCopyInto(&out.PodAttributesSpec) return } @@ -409,7 +381,7 @@ func (in *TiKVStatus) DeepCopyInto(out *TiKVStatus) { *out = *in if in.StatefulSet != nil { in, out := &in.StatefulSet, &out.StatefulSet - *out = new(appsv1.StatefulSetStatus) + *out = new(v1.StatefulSetStatus) (*in).DeepCopyInto(*out) } if in.Stores != nil { diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index eb11a8d4ea..9f96fb317b 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -551,9 +551,10 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) }, }, }, - RestartPolicy: corev1.RestartPolicyAlways, - Tolerations: tc.Spec.PD.Tolerations, - Volumes: vols, + RestartPolicy: corev1.RestartPolicyAlways, + Tolerations: tc.Spec.PD.Tolerations, + Volumes: vols, + SecurityContext: tc.Spec.PD.PodSecurityContext, }, }, VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index 9ad2af62d1..9e42234fe4 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -347,13 +347,14 @@ func (tmm *tidbMemberManager) getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbClust Annotations: podAnnotations, }, Spec: corev1.PodSpec{ - SchedulerName: tc.Spec.SchedulerName, - Affinity: tc.Spec.TiDB.Affinity, - NodeSelector: tc.Spec.TiDB.NodeSelector, - Containers: containers, - RestartPolicy: corev1.RestartPolicyAlways, - Tolerations: tc.Spec.TiDB.Tolerations, - Volumes: vols, + SchedulerName: tc.Spec.SchedulerName, + Affinity: tc.Spec.TiDB.Affinity, + NodeSelector: tc.Spec.TiDB.NodeSelector, + Containers: containers, + RestartPolicy: corev1.RestartPolicyAlways, + Tolerations: tc.Spec.TiDB.Tolerations, + Volumes: vols, + SecurityContext: tc.Spec.TiDB.PodSecurityContext, }, }, ServiceName: controller.TiDBPeerMemberName(tcName), diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 59ed28caa8..c5ee40b8a0 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -385,9 +385,10 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster) }, }, }, - RestartPolicy: corev1.RestartPolicyAlways, - Tolerations: tc.Spec.TiKV.Tolerations, - Volumes: vols, + RestartPolicy: corev1.RestartPolicyAlways, + Tolerations: tc.Spec.TiKV.Tolerations, + Volumes: vols, + SecurityContext: tc.Spec.TiKV.PodSecurityContext, }, }, VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ From 99c47ba1b5c49f26b176b2a7521fc1f695ad9bc7 Mon Sep 17 00:00:00 2001 From: Aylei Date: Mon, 16 Sep 2019 14:18:03 +0800 Subject: [PATCH 2/2] Address review comments Signed-off-by: Aylei --- deploy/modules/aws/tidb-cluster/values/default.yaml | 2 +- pkg/apis/pingcap.com/v1alpha1/types.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/modules/aws/tidb-cluster/values/default.yaml b/deploy/modules/aws/tidb-cluster/values/default.yaml index 36145ba493..7af651ed3f 100644 --- a/deploy/modules/aws/tidb-cluster/values/default.yaml +++ b/deploy/modules/aws/tidb-cluster/values/default.yaml @@ -25,7 +25,7 @@ tidb: - name: net.ipv4.tcp_keepalive_time value: "300" - name: net.ipv4.tcp_keepalive_intvl - value: "300" + value: "75" monitor: storage: 100Gi storageClassName: ebs-gp2 diff --git a/pkg/apis/pingcap.com/v1alpha1/types.go b/pkg/apis/pingcap.com/v1alpha1/types.go index 22669aa35b..3a8d89ada2 100644 --- a/pkg/apis/pingcap.com/v1alpha1/types.go +++ b/pkg/apis/pingcap.com/v1alpha1/types.go @@ -93,7 +93,6 @@ type TidbClusterSpec struct { Services []Service `json:"services,omitempty"` PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` Timezone string `json:"timezone,omitempty"` - MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` } // TidbClusterStatus represents the current status of a tidb cluster.