From 05a6edacb6d96d7e8edbf0176ccacce2c594845b Mon Sep 17 00:00:00 2001 From: Jacob Lerche Date: Wed, 15 May 2019 08:09:03 -0700 Subject: [PATCH] Adds tweaks and formatting --- deploy/gcp/README.md | 4 +- deploy/gcp/data.tf | 9 +- deploy/gcp/main.tf | 164 ++++++++++-------- deploy/gcp/outputs.tf | 4 + .../templates/tidb-cluster-values.yaml.tpl | 2 +- deploy/gcp/variables.tf | 10 +- 6 files changed, 106 insertions(+), 87 deletions(-) diff --git a/deploy/gcp/README.md b/deploy/gcp/README.md index 664b34416ad..f66f6c5327a 100644 --- a/deploy/gcp/README.md +++ b/deploy/gcp/README.md @@ -45,7 +45,7 @@ gcloud services enable container.googleapis.com Now we can launch the script: ```bash -git clone https://github.com/pingcap/tidb-operator +git clone --depth=1 https://github.com/pingcap/tidb-operator cd tidb-operator/deploy/gcp terraform init terraform apply @@ -82,7 +82,7 @@ To upgrade TiDB cluster, modify `tidb_version` variable to a higher version in v ## Scale TiDB cluster -To scale TiDB cluster, modify `tikv_count` or `tidb_count` to your desired count, and then run `terraform apply`. +To scale TiDB cluster, modify `tikv_count`, `tikv_replica_count`, `tidb_count`, and `tidb_replica_count` to your desired count, and then run `terraform apply`. > *Note*: Currently, scaling in is not supported since we cannot determine which node to scale. Scaling out needs a few minutes to complete, you can watch the scaling out by `watch kubectl --kubeconfig credentials/kubeconfig_ get po -n tidb` diff --git a/deploy/gcp/data.tf b/deploy/gcp/data.tf index b7bb9bb7b7a..4a6de8d9e63 100644 --- a/deploy/gcp/data.tf +++ b/deploy/gcp/data.tf @@ -1,10 +1,11 @@ data "template_file" "tidb_cluster_values" { template = "${file("${path.module}/templates/tidb-cluster-values.yaml.tpl")}" - vars { + + vars { cluster_version = "${var.tidb_version}" - pd_replicas = "${var.pd_replica_count}" - tikv_replicas = "${var.tikv_replica_count}" - tidb_replicas = "${var.tidb_replica_count}" + pd_replicas = "${var.pd_replica_count}" + tikv_replicas = "${var.tikv_replica_count}" + tidb_replicas = "${var.tidb_replica_count}" } } diff --git a/deploy/gcp/main.tf b/deploy/gcp/main.tf index d150872839f..19bdccfc4fd 100644 --- a/deploy/gcp/main.tf +++ b/deploy/gcp/main.tf @@ -4,20 +4,20 @@ variable "GCP_PROJECT" {} provider "google" { credentials = "${file("${var.GCP_CREDENTIALS_PATH}")}" - region = "${var.GCP_REGION}" - project = "${var.GCP_PROJECT}" + region = "${var.GCP_REGION}" + project = "${var.GCP_PROJECT}" } // required for taints on node pools provider "google-beta" { credentials = "${file("${var.GCP_CREDENTIALS_PATH}")}" - region = "${var.GCP_REGION}" - project = "${var.GCP_PROJECT}" + region = "${var.GCP_REGION}" + project = "${var.GCP_PROJECT}" } locals { - credential_path = "${path.module}/credentials" - kubeconfig = "${local.credential_path}/kubeconfig_${var.cluster_name}" + credential_path = "${path.module}/credentials" + kubeconfig = "${local.credential_path}/kubeconfig_${var.cluster_name}" tidb_cluster_values_path = "${path.module}/rendered/tidb-cluster-values.yaml" } @@ -28,39 +28,41 @@ resource "null_resource" "prepare-dir" { } resource "google_compute_network" "vpc_network" { - name = "vpc-network" + name = "vpc-network" auto_create_subnetworks = false - project = "${var.GCP_PROJECT}" + project = "${var.GCP_PROJECT}" } resource "google_compute_subnetwork" "private_subnet" { ip_cidr_range = "172.31.252.0/22" - name = "private-subnet" - network = "${google_compute_network.vpc_network.self_link}" - project = "${var.GCP_PROJECT}" + name = "private-subnet" + network = "${google_compute_network.vpc_network.self_link}" + project = "${var.GCP_PROJECT}" + secondary_ip_range { ip_cidr_range = "172.30.0.0/16" - range_name = "pods-${var.GCP_REGION}" + range_name = "pods-${var.GCP_REGION}" } + secondary_ip_range { ip_cidr_range = "172.31.224.0/20" - range_name = "services-${var.GCP_REGION}" + range_name = "services-${var.GCP_REGION}" } } resource "google_compute_subnetwork" "public_subnet" { ip_cidr_range = "172.29.252.0/22" - name = "public-subnet" - network = "${google_compute_network.vpc_network.self_link}" - project = "${var.GCP_PROJECT}" + name = "public-subnet" + network = "${google_compute_network.vpc_network.self_link}" + project = "${var.GCP_PROJECT}" } resource "google_container_cluster" "cluster" { - name = "${var.cluster_name}" - network = "${google_compute_network.vpc_network.self_link}" + name = "${var.cluster_name}" + network = "${google_compute_network.vpc_network.self_link}" subnetwork = "${google_compute_subnetwork.private_subnet.self_link}" - location = "${var.GCP_REGION}" - project = "${var.GCP_PROJECT}" + location = "${var.GCP_REGION}" + project = "${var.GCP_PROJECT}" master_auth { username = "" @@ -78,173 +80,183 @@ resource "google_container_cluster" "cluster" { } remove_default_node_pool = true - initial_node_count = 1 + initial_node_count = 1 min_master_version = "latest" } - resource "google_container_node_pool" "pd_pool" { - provider = "google-beta" - project = "${var.GCP_PROJECT}" - cluster = "${google_container_cluster.cluster.name}" - location = "${google_container_cluster.cluster.location}" - name = "pd-pool" + provider = "google-beta" + project = "${var.GCP_PROJECT}" + cluster = "${google_container_cluster.cluster.name}" + location = "${google_container_cluster.cluster.location}" + name = "pd-pool" initial_node_count = "${var.pd_count}" node_config { - machine_type = "${var.pd_instance_type}" + machine_type = "${var.pd_instance_type}" local_ssd_count = 1 + taint { effect = "NO_SCHEDULE" - key = "dedicated" - value = "pd" + key = "dedicated" + value = "pd" } + labels { dedicated = "pd" } - tags = ["pd"] + + tags = ["pd"] oauth_scopes = ["storage-ro", "logging-write", "monitoring"] } - } resource "google_container_node_pool" "tikv_pool" { - provider = "google-beta" - project = "${var.GCP_PROJECT}" - cluster = "${google_container_cluster.cluster.name}" - location = "${google_container_cluster.cluster.location}" - name = "tikv-pool" + provider = "google-beta" + project = "${var.GCP_PROJECT}" + cluster = "${google_container_cluster.cluster.name}" + location = "${google_container_cluster.cluster.location}" + name = "tikv-pool" initial_node_count = "${var.tikv_count}" node_config { - machine_type = "${var.tikv_instance_type}" + machine_type = "${var.tikv_instance_type}" local_ssd_count = 1 + taint { effect = "NO_SCHEDULE" - key = "dedicated" - value = "tikv" + key = "dedicated" + value = "tikv" } + labels { dedicated = "tikv" } - tags = ["tikv"] - oauth_scopes = ["storage-ro", "logging-write", "monitoring"] + tags = ["tikv"] + oauth_scopes = ["storage-ro", "logging-write", "monitoring"] } - } resource "google_container_node_pool" "tidb_pool" { - provider = "google-beta" - project = "${var.GCP_PROJECT}" - cluster = "${google_container_cluster.cluster.name}" - location = "${google_container_cluster.cluster.location}" - name = "tidb-pool" + provider = "google-beta" + project = "${var.GCP_PROJECT}" + cluster = "${google_container_cluster.cluster.name}" + location = "${google_container_cluster.cluster.location}" + name = "tidb-pool" initial_node_count = "${var.tidb_count}" node_config { machine_type = "${var.tidb_instance_type}" + taint { effect = "NO_SCHEDULE" - key = "dedicated" - value = "tidb" + key = "dedicated" + value = "tidb" } + labels { dedicated = "tidb" } - tags = ["tidb"] + + tags = ["tidb"] oauth_scopes = ["storage-ro", "logging-write", "monitoring"] } - } resource "google_container_node_pool" "monitor_pool" { - project = "${var.GCP_PROJECT}" - cluster = "${google_container_cluster.cluster.name}" - location = "${google_container_cluster.cluster.location}" - name = "monitor-pool" + project = "${var.GCP_PROJECT}" + cluster = "${google_container_cluster.cluster.name}" + location = "${google_container_cluster.cluster.location}" + name = "monitor-pool" initial_node_count = "1" node_config { machine_type = "${var.monitor_instance_type}" - tags = ["monitor"] + tags = ["monitor"] oauth_scopes = ["storage-ro", "logging-write", "monitoring"] } - } resource "google_compute_firewall" "allow_ssh_bastion" { - name = "allow-ssh-bastion" + name = "allow-ssh-bastion" network = "${google_compute_network.vpc_network.self_link}" project = "${var.GCP_PROJECT}" allow { protocol = "tcp" - ports = ["22"] + ports = ["22"] } + source_ranges = ["0.0.0.0/0"] - target_tags = ["bastion"] + target_tags = ["bastion"] } resource "google_compute_firewall" "allow_mysql_from_bastion" { - name = "allow-mysql-from-bastion" + name = "allow-mysql-from-bastion" network = "${google_compute_network.vpc_network.self_link}" project = "${var.GCP_PROJECT}" allow { protocol = "tcp" - ports = ["4000"] + ports = ["4000"] } + source_tags = ["bastion"] target_tags = ["tidb"] } resource "google_compute_firewall" "allow_ssh_from_bastion" { - name = "allow-ssh-from-bastion" + name = "allow-ssh-from-bastion" network = "${google_compute_network.vpc_network.self_link}" project = "${var.GCP_PROJECT}" allow { protocol = "tcp" - ports = ["22"] + ports = ["22"] } + source_tags = ["bastion"] target_tags = ["tidb", "tikv", "pd", "monitor"] } resource "google_compute_instance" "bastion" { - project = "${var.GCP_PROJECT}" - zone = "${var.GCP_REGION}-a" + project = "${var.GCP_PROJECT}" + zone = "${var.GCP_REGION}-a" machine_type = "${var.bastion_instance_type}" - name = "bastion" + name = "bastion" + "boot_disk" { initialize_params { image = "ubuntu-os-cloud/ubuntu-1804-lts" } } + "network_interface" { - subnetwork = "${google_compute_subnetwork.public_subnet.self_link}" - access_config {} + subnetwork = "${google_compute_subnetwork.public_subnet.self_link}" + access_config = {} } + tags = ["bastion"] - metadata_startup_script = "sudo apt-get install -y mysql-client && curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.rpm.sh | bash && sudo apt-get -y install sysbench" + metadata_startup_script = "sudo apt-get install -y mysql-client && curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.deb.sh | bash && sudo apt-get -y install sysbench" } resource "null_resource" "get-credentials" { provisioner "local-exec" { command = "gcloud container clusters get-credentials ${google_container_cluster.cluster.name} --region ${var.GCP_REGION}" + environment { - KUBECONFIG= "${local.kubeconfig}" + KUBECONFIG = "${local.kubeconfig}" } } } resource "local_file" "tidb-cluster-values" { depends_on = ["data.template_file.tidb_cluster_values"] - filename = "${local.tidb_cluster_values_path}" - content = "${data.template_file.tidb_cluster_values.rendered}" + filename = "${local.tidb_cluster_values_path}" + content = "${data.template_file.tidb_cluster_values.rendered}" } resource "null_resource" "setup-env" { @@ -252,6 +264,7 @@ resource "null_resource" "setup-env" { provisioner "local-exec" { working_dir = "${path.module}" + command = <