From d262906aaf40b723782c4d1c3c7d3022d77c7ff1 Mon Sep 17 00:00:00 2001 From: DanielZhangQD <36026334+DanielZhangQD@users.noreply.github.com> Date: Mon, 23 Mar 2020 22:00:31 +0800 Subject: [PATCH] create tidb cluster with cr on aws (#2004) --- deploy/aws/clusters.tf | 59 +++++----- deploy/aws/manifests/db-monitor.yaml.example | 84 +++++++++++++++ deploy/aws/manifests/db.yaml.example | 108 +++++++++++++++++++ deploy/aws/variables.tf | 6 +- 4 files changed, 226 insertions(+), 31 deletions(-) create mode 100644 deploy/aws/manifests/db-monitor.yaml.example create mode 100644 deploy/aws/manifests/db.yaml.example diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index ac3d9ff777..a8bf9691bd 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -17,25 +17,24 @@ provider "helm" { } # TiDB cluster declaration example -#module "example-cluster" { -# source = "./tidb-cluster" -# eks_info = local.default_eks -# subnets = local.default_subnets -# -# # NOTE: cluster_name cannot be changed after creation -# cluster_name = "demo-cluster" -# cluster_version = "v3.0.8" -# ssh_key_name = module.key-pair.key_name -# pd_count = 1 -# pd_instance_type = "t2.xlarge" -# tikv_count = 1 -# tikv_instance_type = "t2.xlarge" -# tidb_count = 1 -# tidb_instance_type = "t2.xlarge" -# monitor_instance_type = "t2.xlarge" -# # yaml file that passed to helm to customize the release -# override_values = file("values/example.yaml") -#} +# module example-cluster { +# source = "../modules/aws/tidb-cluster" + +# eks = local.eks +# subnets = local.subnets +# region = var.region +# cluster_name = "example" + +# ssh_key_name = module.key-pair.key_name +# pd_count = 1 +# pd_instance_type = "c5.large" +# tikv_count = 1 +# tikv_instance_type = "c5d.large" +# tidb_count = 1 +# tidb_instance_type = "c4.large" +# monitor_instance_type = "c5.large" +# create_tidb_cluster_release = false +# } module "default-cluster" { providers = { @@ -46,15 +45,15 @@ module "default-cluster" { subnets = local.subnets region = var.region - cluster_name = var.default_cluster_name - cluster_version = var.default_cluster_version - ssh_key_name = module.key-pair.key_name - pd_count = var.default_cluster_pd_count - pd_instance_type = var.default_cluster_pd_instance_type - tikv_count = var.default_cluster_tikv_count - tikv_instance_type = var.default_cluster_tikv_instance_type - tidb_count = var.default_cluster_tidb_count - tidb_instance_type = var.default_cluster_tidb_instance_type - monitor_instance_type = var.default_cluster_monitor_instance_type - override_values = file("default-cluster.yaml") + cluster_name = var.default_cluster_name + cluster_version = var.default_cluster_version + ssh_key_name = module.key-pair.key_name + pd_count = var.default_cluster_pd_count + pd_instance_type = var.default_cluster_pd_instance_type + tikv_count = var.default_cluster_tikv_count + tikv_instance_type = var.default_cluster_tikv_instance_type + tidb_count = var.default_cluster_tidb_count + tidb_instance_type = var.default_cluster_tidb_instance_type + monitor_instance_type = var.default_cluster_monitor_instance_type + create_tidb_cluster_release = var.create_tidb_cluster_release } diff --git a/deploy/aws/manifests/db-monitor.yaml.example b/deploy/aws/manifests/db-monitor.yaml.example new file mode 100644 index 0000000000..da607309b4 --- /dev/null +++ b/deploy/aws/manifests/db-monitor.yaml.example @@ -0,0 +1,84 @@ +apiVersion: pingcap.com/v1alpha1 +kind: TidbMonitor +metadata: + name: CLUSTER_NAME +spec: + alertmanagerURL: "" + annotations: {} + clusters: + - name: CLUSTER_NAME + grafana: + baseImage: grafana/grafana + envs: + # Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD + # Ref https://grafana.com/docs/installation/configuration/#using-environment-variables + GF_AUTH_ANONYMOUS_ENABLED: "true" + GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org." + GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" + # if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana + # GF_SERVER_DOMAIN: foo.bar + # GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/" + imagePullPolicy: IfNotPresent + logLevel: info + password: admin + resources: {} + # limits: + # cpu: 8000m + # memory: 8Gi + # requests: + # cpu: 4000m + # memory: 4Gi + service: + portName: http-grafana + type: LoadBalancer + username: admin + version: 6.0.1 + imagePullPolicy: IfNotPresent + initializer: + baseImage: pingcap/tidb-monitor-initializer + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 50m + # memory: 64Mi + version: v3.0.12 + kubePrometheusURL: "" + nodeSelector: {} + persistent: true + prometheus: + baseImage: prom/prometheus + imagePullPolicy: IfNotPresent + logLevel: info + reserveDays: 12 + resources: {} + # limits: + # cpu: 8000m + # memory: 8Gi + # requests: + # cpu: 4000m + # memory: 4Gi + service: + portName: http-prometheus + type: NodePort + version: v2.11.1 + reloader: + baseImage: pingcap/tidb-monitor-reloader + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 50m + # memory: 64Mi + service: + portName: tcp-reloader + type: NodePort + version: v1.0.1 + storage: 100Gi + storageClassName: ebs-gp2 + tolerations: [] + diff --git a/deploy/aws/manifests/db.yaml.example b/deploy/aws/manifests/db.yaml.example new file mode 100644 index 0000000000..5a4eb9c2bc --- /dev/null +++ b/deploy/aws/manifests/db.yaml.example @@ -0,0 +1,108 @@ +apiVersion: pingcap.com/v1alpha1 +kind: TidbCluster +metadata: + name: CLUSTER_NAME +spec: + configUpdateStrategy: RollingUpdate + enableTLSCluster: false + helper: + image: busybox:1.31.1 + hostNetwork: false + imagePullPolicy: IfNotPresent + pd: + affinity: {} + baseImage: pingcap/pd + config: + log: + level: info + replication: + location-labels: + - zone + max-replicas: 3 + nodeSelector: + dedicated: CLUSTER_NAME-pd + podSecurityContext: {} + replicas: 3 + requests: + cpu: "1" + memory: 400Mi + storage: 1Gi + storageClassName: ebs-gp2 + tolerations: + - effect: NoSchedule + key: dedicated + operator: Equal + value: CLUSTER_NAME-pd + pvReclaimPolicy: Retain + schedulerName: tidb-scheduler + tidb: + affinity: {} + baseImage: pingcap/tidb + config: + log: + level: info + performance: + max-procs: 0 + tcp-keep-alive: true + enableTLSClient: false + maxFailoverCount: 3 + nodeSelector: + dedicated: CLUSTER_NAME-tidb + podSecurityContext: + sysctls: + - name: net.ipv4.tcp_keepalive_time + value: "300" + - name: net.ipv4.tcp_keepalive_intvl + value: "75" + - name: net.core.somaxconn + value: "32768" + replicas: 2 + requests: + cpu: "1" + memory: 400Mi + separateSlowLog: true + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' + service.beta.kubernetes.io/aws-load-balancer-internal: '0.0.0.0/0' + service.beta.kubernetes.io/aws-load-balancer-type: nlb + exposeStatus: true + externalTrafficPolicy: Local + type: LoadBalancer + slowLogTailer: + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 20m + memory: 5Mi + tolerations: + - effect: NoSchedule + key: dedicated + operator: Equal + value: CLUSTER_NAME-tidb + tikv: + affinity: {} + baseImage: pingcap/tikv + config: + log-level: info + hostNetwork: false + maxFailoverCount: 3 + nodeSelector: + dedicated: CLUSTER_NAME-tikv + podSecurityContext: {} + privileged: false + replicas: 3 + requests: + cpu: "1" + memory: 2Gi + storage: 45Gi + storageClassName: local-storage + tolerations: + - effect: NoSchedule + key: dedicated + operator: Equal + value: CLUSTER_NAME-tikv + timezone: UTC + version: v3.0.12 + diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index 0ad33b44f7..7691663a5c 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -19,7 +19,7 @@ variable "eks_version" { variable "operator_version" { description = "TiDB operator version" - default = "v1.0.6" + default = "v1.1.0" } variable "operator_values" { @@ -115,3 +115,7 @@ variable "default_cluster_name" { default = "my-cluster" } +variable "create_tidb_cluster_release" { + description = "whether creating tidb-cluster helm release" + default = false +}