From 36074fa2d3b81b63397f686b2f2347c3b944c0bd Mon Sep 17 00:00:00 2001 From: Allen Zhong Date: Tue, 28 May 2019 18:53:29 +0800 Subject: [PATCH 1/4] deploy/aws: split private and subnet ids & make comments more clear --- deploy/aws/main.tf | 4 ++-- deploy/aws/variables.tf | 25 ++++++++++++++++++++++--- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index f5687f5b04..2bb0c93e4b 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -69,7 +69,7 @@ module "ec2" { monitoring = false user_data = "${file("bastion-userdata")}" vpc_security_group_ids = ["${aws_security_group.ssh.id}"] - subnet_ids = "${split(",", var.create_vpc ? join(",", module.vpc.public_subnets) : join(",", var.subnets))}" + subnet_ids = "${split(",", var.create_vpc ? join(",", module.vpc.public_subnets) : join(",", var.public_subnet_ids))}" tags = { app = "tidb" @@ -86,7 +86,7 @@ module "eks" { cluster_name = "${var.cluster_name}" cluster_version = "${var.k8s_version}" config_output_path = "credentials/" - subnets = "${split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets))}" + subnets = "${split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.private_subnet_ids))}" vpc_id = "${var.create_vpc ? module.vpc.vpc_id : var.vpc_id}" # instance types: https://aws.amazon.com/ec2/instance-types/ diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index ff6f0d9e0d..75e1e819d5 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -8,11 +8,17 @@ variable "ingress_cidr" { default = ["0.0.0.0/0"] # Note: Please restrict your ingress to only necessary IPs. Opening to 0.0.0.0/0 can lead to security vulnerabilities. } +# If you have an exist VPC that you'd like to use, set this value to `false` and +# adjust `vpc_id`, `private_subnet_ids` and `public_subnet_ids` to your exist ones +# Please note that this is only for manually created VPCs, deploying multiple EKS +# clusters in one VPC is NOT supported now. variable "create_vpc" { - description = "Create a new VPC or not, if true the vpc_cidr/private_subnets/public_subnets must be set correctly, otherwise vpc_id/subnet_ids must be set correctly" + description = "Create a new VPC or not, if true the vpc_cidr/private_subnets/public_subnets must be set correctly, otherwise vpc_id/private_subnet_ids/public_subnet_ids must be set correctly" default = true } +# The networks you'd like to use within the VPC. +# This value will be ignored if `create_vpc=false` variable "vpc_cidr" { description = "vpc cidr" default = "10.0.0.0/16" @@ -30,14 +36,27 @@ variable "public_subnets" { default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] } +# The ID of exist VPC +# This value will be ignored if `create_vpc=true` variable "vpc_id" { description = "VPC id" type = "string" default = "vpc-c679deae" } -variable "subnets" { - description = "subnet id list" +# The subnet IDs of your private and public networks, if you want +# to use the same subnets for both private and public usage, just +# set their values identical. +# These values will be ignored if `create_vpc=true` +variable "private_subnet_ids" { + description = "private subnet id list" + type = "list" + default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] +} + + +variable "public_subnet_ids" { + description = "public subnet id list" type = "list" default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] } From 10849a19bf744c9ba955d824d4add2a402d78db7 Mon Sep 17 00:00:00 2001 From: Allen Zhong Date: Tue, 28 May 2019 18:56:37 +0800 Subject: [PATCH 2/4] deploy/aws: make helm release name unique --- deploy/aws/data.tf | 8 ++++---- deploy/aws/main.tf | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/deploy/aws/data.tf b/deploy/aws/data.tf index 22924f5d8b..ea7fbfc547 100644 --- a/deploy/aws/data.tf +++ b/deploy/aws/data.tf @@ -29,7 +29,7 @@ data "template_file" "tidb_cluster_values" { # data "kubernetes_service" "tidb" { # depends_on = ["helm_release.tidb-cluster"] # metadata { -# name = "tidb-cluster-tidb" +# name = "tidb-cluster-${var.cluster_name}-tidb" # namespace = "tidb" # } # } @@ -37,17 +37,17 @@ data "template_file" "tidb_cluster_values" { # data "kubernetes_service" "monitor" { # depends_on = ["helm_release.tidb-cluster"] # metadata { -# name = "tidb-cluster-grafana" +# name = "tidb-cluster-${var.cluster_name}-grafana" # namespace = "tidb" # } # } data "external" "tidb_service" { depends_on = ["null_resource.wait-tidb-ready"] - program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-tidb -ojson | jq '.status.loadBalancer.ingress[0]'"] + program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-${var.cluster_name}-tidb -ojson | jq '.status.loadBalancer.ingress[0]'"] } data "external" "monitor_service" { depends_on = ["null_resource.wait-tidb-ready"] - program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-grafana -ojson | jq '.status.loadBalancer.ingress[0]'"] + program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-${var.cluster_name}-grafana -ojson | jq '.status.loadBalancer.ingress[0]'"] } diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index 2bb0c93e4b..634c55a37c 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -209,7 +209,7 @@ resource "helm_release" "tidb-operator" { resource "helm_release" "tidb-cluster" { depends_on = ["helm_release.tidb-operator"] - name = "tidb-cluster" + name = "tidb-cluster-${var.cluster_name}" namespace = "tidb" chart = "${path.module}/charts/tidb-cluster" values = [ @@ -226,11 +226,11 @@ until kubectl get po -n tidb -lapp.kubernetes.io/component=tidb | grep Running; echo "Wait TiDB pod running" sleep 5 done -until kubectl get svc -n tidb tidb-cluster-tidb | grep elb; do +until kubectl get svc -n tidb tidb-cluster-${var.cluster_name}-tidb | grep elb; do echo "Wait TiDB service ready" sleep 5 done -until kubectl get svc -n tidb tidb-cluster-grafana | grep elb; do +until kubectl get svc -n tidb tidb-cluster-${var.cluster_name}-grafana | grep elb; do echo "Wait monitor service ready" sleep 5 done From 5bd618483b162d3305179959219796fe288e5add Mon Sep 17 00:00:00 2001 From: Allen Zhong Date: Tue, 28 May 2019 19:04:29 +0800 Subject: [PATCH 3/4] docs/aws: add notes of not reusing VPC for exist clusters --- deploy/aws/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 0691fb1c7e..189b796813 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -76,7 +76,7 @@ monitor_endpoint = http://abd299cc47af411e98aae02938da0762-1989524000.us-east-2. region = us-east-2 tidb_dns = abd2e3f7c7af411e98aae02938da0762-17499b76b312be02.elb.us-east-2.amazonaws.com tidb_port = 4000 -tidb_version = v3.0.0-rc.1 +tidb_version = v2.1.8 ``` > **Note:** You can use the `terraform output` command to get the output again. @@ -149,7 +149,9 @@ You can change default values in `variables.tf` (such as the cluster name and im ### Customize AWS related resources -By default, the terraform script will create a new VPC. You can use an existing VPC by setting `create_vpc` to `false` and specify your existing VPC id and subnet ids to `vpc_id` and `subnets` variables. +By default, the terraform script will create a new VPC. You can use an existing VPC by setting `create_vpc` to `false` and specify your existing VPC id and subnet ids to `vpc_id`, `private_subnet_ids` and `public_subnet_ids` variables. + +**Note:** Reusing VPC and subnets of an existing EKS cluster is not supported yet due to limitations of AWS and Terraform, so only change this option if you have to use a manually created VPC. An ec2 instance is also created by default as bastion machine to connect to the created TiDB cluster, because the TiDB service is exposed as an [Internal Elastic Load Balancer](https://aws.amazon.com/blogs/aws/internal-elastic-load-balancers/). The ec2 instance has MySQL and Sysbench pre-installed, so you can SSH into the ec2 instance and connect to TiDB using the ELB endpoint. You can disable the bastion instance creation by setting `create_bastion` to `false` if you already have an ec2 instance in the VPC. From a4c46620014857c669341e319bf8dc5e1346e752 Mon Sep 17 00:00:00 2001 From: Allen Zhong Date: Thu, 30 May 2019 11:35:36 +0800 Subject: [PATCH 4/4] deploy/aws: adjust comments and descriptions --- deploy/aws/README.md | 8 ++++---- deploy/aws/variables.tf | 28 ++++++++++------------------ 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 189b796813..6e55eb6385 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -76,7 +76,7 @@ monitor_endpoint = http://abd299cc47af411e98aae02938da0762-1989524000.us-east-2. region = us-east-2 tidb_dns = abd2e3f7c7af411e98aae02938da0762-17499b76b312be02.elb.us-east-2.amazonaws.com tidb_port = 4000 -tidb_version = v2.1.8 +tidb_version = v3.0.0-rc.1 ``` > **Note:** You can use the `terraform output` command to get the output again. @@ -127,7 +127,7 @@ For example, to upgrade the cluster to version 2.1.10, modify the `tidb_version` } ``` -> *Note*: The upgrading doesn't finish immediately. You can watch the upgrading process by `kubectl --kubeconfig credentials/kubeconfig_ get po -n tidb --watch`. +> **Note**: The upgrading doesn't finish immediately. You can watch the upgrading process by `kubectl --kubeconfig credentials/kubeconfig_ get po -n tidb --watch`. ## Scale @@ -141,7 +141,7 @@ For example, to scale out the cluster, you can modify the number of TiDB instanc } ``` -> *Note*: Currently, scaling in is NOT supported since we cannot determine which node to scale. Scaling out needs a few minutes to complete, you can watch the scaling out by `kubectl --kubeconfig credentials/kubeconfig_ get po -n tidb --watch`. +> **Note**: Currently, scaling in is NOT supported since we cannot determine which node to scale. Scaling out needs a few minutes to complete, you can watch the scaling out by `kubectl --kubeconfig credentials/kubeconfig_ get po -n tidb --watch`. ## Customize @@ -151,7 +151,7 @@ You can change default values in `variables.tf` (such as the cluster name and im By default, the terraform script will create a new VPC. You can use an existing VPC by setting `create_vpc` to `false` and specify your existing VPC id and subnet ids to `vpc_id`, `private_subnet_ids` and `public_subnet_ids` variables. -**Note:** Reusing VPC and subnets of an existing EKS cluster is not supported yet due to limitations of AWS and Terraform, so only change this option if you have to use a manually created VPC. +> **Note:** Reusing VPC and subnets of an existing EKS cluster is not supported yet due to limitations of AWS and Terraform, so only change this option if you have to use a manually created VPC. An ec2 instance is also created by default as bastion machine to connect to the created TiDB cluster, because the TiDB service is exposed as an [Internal Elastic Load Balancer](https://aws.amazon.com/blogs/aws/internal-elastic-load-balancers/). The ec2 instance has MySQL and Sysbench pre-installed, so you can SSH into the ec2 instance and connect to TiDB using the ELB endpoint. You can disable the bastion instance creation by setting `create_bastion` to `false` if you already have an ec2 instance in the VPC. diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index 75e1e819d5..619d644e04 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -4,59 +4,51 @@ variable "region" { } variable "ingress_cidr" { - description = "IP cidr that allowed to access bastion ec2 instance" + description = "IP CIDR that allowed to access bastion ec2 instance" default = ["0.0.0.0/0"] # Note: Please restrict your ingress to only necessary IPs. Opening to 0.0.0.0/0 can lead to security vulnerabilities. } -# If you have an exist VPC that you'd like to use, set this value to `false` and -# adjust `vpc_id`, `private_subnet_ids` and `public_subnet_ids` to your exist ones # Please note that this is only for manually created VPCs, deploying multiple EKS # clusters in one VPC is NOT supported now. variable "create_vpc" { - description = "Create a new VPC or not, if true the vpc_cidr/private_subnets/public_subnets must be set correctly, otherwise vpc_id/private_subnet_ids/public_subnet_ids must be set correctly" + description = "Create a new VPC or not. If there is an existing VPC that you'd like to use, set this value to `false` and adjust `vpc_id`, `private_subnet_ids` and `public_subnet_ids` to the existing ones." default = true } -# The networks you'd like to use within the VPC. -# This value will be ignored if `create_vpc=false` variable "vpc_cidr" { - description = "vpc cidr" + description = "The network to use within the VPC. This value is ignored if `create_vpc=false`." default = "10.0.0.0/16" } variable "private_subnets" { - description = "vpc private subnets" + description = "The networks to use for private subnets. This value is ignored if `create_vpc=false`." type = "list" default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] } variable "public_subnets" { - description = "vpc public subnets" + description = "The networks to use for public subnets. This value is ignored if `create_vpc=false`." type = "list" default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] } -# The ID of exist VPC -# This value will be ignored if `create_vpc=true` variable "vpc_id" { - description = "VPC id" + description = "ID of the existing VPC. This value is ignored if `create_vpc=true`." type = "string" default = "vpc-c679deae" } -# The subnet IDs of your private and public networks, if you want -# to use the same subnets for both private and public usage, just -# set their values identical. -# These values will be ignored if `create_vpc=true` +# To use the same subnets for both private and public usage, +# just set their values identical. variable "private_subnet_ids" { - description = "private subnet id list" + description = "The subnet ID(s) of the existing private networks. This value is ignored if `create_vpc=true`." type = "list" default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] } variable "public_subnet_ids" { - description = "public subnet id list" + description = "The subnet ID(s) of the existing public networks. This value is ignored if `create_vpc=true`." type = "list" default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] }