From a349ed1af290f01d2f0e6d2866116e25ed14d35a Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Tue, 29 Jan 2019 14:24:51 -0800 Subject: [PATCH] pkg/destroy/bootstrap: Separate load-balancer target teardown Close a small window for lost Kubernetes API traffic: * The Terraform tear-down could remove the bootstrap machine before it removes the bootstrap load-balancer target, leaving the target pointing into empty space. * Bootstrap teardown does not allow existing client connections to drain after removing the load balancer target before removing the bootstrap machine. With this commit, we: 1. Wait 30 seconds for the production control plane to come up. 2. Remove the bootstrap load-balancer targets. 3. Wait 10 seconds for requests to the bootstrap machine to drain out. 4. Remove the remaining bootstrap resources, including the bootstrap machine. The 30-second calculation is provider specific. On AWS, it is 30-seconds for AWS to notice out production control-plane targets are live (healthy_threshold * interval for our aws_lb_target_group on AWS). This assumes the post-pod manifests are all pushed in zero seconds, so it's overly conservative, but waiting an extra 30 seconds isn't a large cost. The 30-second delay doesn't really matter for libvirt, because clients will have been banging away at the production control plane the whole time, with those requests failing until the control plane came up to listen. But an extra 30 second delay is not a big deal either. The 10-second delay for draining works around a Terraform plugin limitation on AWS. From the AWS network load-balancer docs [2]: > Connection draining ensures that in-flight requests complete before > existing connections are closed. The initial state of a > deregistering target is draining. By default, the state of a > deregistering target changes to unused after 300 seconds. To change > the amount of time that Elastic Load Balancing waits before changing > the state to unused, update the deregistration delay value. We > recommend that you specify a value of at least 120 seconds to ensure > that requests are completed. And from [3]: > Deregistering a target removes it from your target group, but does > not affect the target otherwise. The load balancer stops routing > requests to a target as soon as it is deregistered. The target > enters the draining state until in-flight requests have completed. The Terraform attachment-deletion logic is in [4], and while it fires a deregister request, it does not wait around for draining to complete. I don't see any issues in the provider repository about waiting for the unused state, but we could push something like that if we wanted more finesse here than a 10-second cross-platform sleep. For the moment, I'm just saying "we know who our consumers are at this point, and none of them will keep an open request going for more than 10 seconds". The 10-second drain delay also seems sufficient for libvirt's round-robin DNS, since clients should be able to fall-back to alternative IPs on their own. We may be able set shorter TTLs on libvirt DNS entries to firm that up, but clean transitions are less important for dev-only libvirt clusters anyway. And, as for the 30-second delay for the production control plane to come up, clients have been banging away on all of these IPs throughout the whole bootstrap process. I'm not sure how OpenStack handles this teardown; naively grepping through data/data/openstack didn't turn up anything that looked much like a bootstrap load-balancer target resource. [1]: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-health-checks.html [2]: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-health-checks.html [3]: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html#registered-targets [4]: pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_target_group_attachment.go#L80-L106 --- cmd/openshift-install/create.go | 3 +++ data/data/aws/main.tf | 4 ++-- data/data/config.tf | 5 +++++ data/data/libvirt/main.tf | 2 +- data/data/libvirt/variables-libvirt.tf | 5 ----- pkg/destroy/bootstrap/bootstrap.go | 30 +++++++++++++------------- 6 files changed, 26 insertions(+), 23 deletions(-) diff --git a/cmd/openshift-install/create.go b/cmd/openshift-install/create.go index 40a333488be..9b6d1b4928b 100644 --- a/cmd/openshift-install/create.go +++ b/cmd/openshift-install/create.go @@ -275,6 +275,9 @@ func destroyBootstrap(ctx context.Context, config *rest.Config, directory string return errors.Wrap(err, "waiting for bootstrap-success") } + logrus.Info("Waiting 30 seconds for the production control-plane to enter the load balancer") + time.Sleep(30*time.Second) + logrus.Info("Destroying the bootstrap resources...") return destroybootstrap.Destroy(rootOpts.dir) } diff --git a/data/data/aws/main.tf b/data/data/aws/main.tf index e5aefcd5fee..fe65f3ec0bf 100644 --- a/data/data/aws/main.tf +++ b/data/data/aws/main.tf @@ -18,8 +18,8 @@ module "bootstrap" { iam_role = "${var.aws_master_iam_role_name}" ignition = "${var.ignition_bootstrap}" subnet_id = "${module.vpc.master_subnet_ids[0]}" - target_group_arns = "${module.vpc.aws_lb_target_group_arns}" - target_group_arns_length = "${module.vpc.aws_lb_target_group_arns_length}" + target_group_arns = "${var.bootstrap_load_balancer_targets ? module.vpc.aws_lb_target_group_arns : []}" + target_group_arns_length = "${var.bootstrap_load_balancer_targets ? module.vpc.aws_lb_target_group_arns_length : 0}" vpc_id = "${module.vpc.vpc_id}" vpc_security_group_ids = "${list(module.vpc.master_sg_id)}" diff --git a/data/data/config.tf b/data/data/config.tf index 1fbaf042fc6..b1033b6e2cc 100644 --- a/data/data/config.tf +++ b/data/data/config.tf @@ -2,6 +2,11 @@ terraform { required_version = ">= 0.10.7" } +variable "bootstrap_load_balancer_targets" { + default = true + description = "Whether to include load-balancer targets for the bootstrap machine." +} + variable "machine_cidr" { type = "string" diff --git a/data/data/libvirt/main.tf b/data/data/libvirt/main.tf index 06bbac25313..fe59420586d 100644 --- a/data/data/libvirt/main.tf +++ b/data/data/libvirt/main.tf @@ -90,7 +90,7 @@ resource "libvirt_domain" "master" { } data "libvirt_network_dns_host_template" "bootstrap" { - count = "${var.bootstrap_dns ? 1 : 0}" + count = "${var.bootstrap_load_balancer_targets ? 1 : 0}" ip = "${var.libvirt_bootstrap_ip}" hostname = "${var.cluster_name}-api" } diff --git a/data/data/libvirt/variables-libvirt.tf b/data/data/libvirt/variables-libvirt.tf index 5b3d40f1fbd..2a975e85fa7 100644 --- a/data/data/libvirt/variables-libvirt.tf +++ b/data/data/libvirt/variables-libvirt.tf @@ -1,8 +1,3 @@ -variable "bootstrap_dns" { - default = true - description = "Whether to include DNS entries for the bootstrap node or not." -} - variable "libvirt_uri" { type = "string" description = "libvirt connection URI" diff --git a/pkg/destroy/bootstrap/bootstrap.go b/pkg/destroy/bootstrap/bootstrap.go index 19c8229a82b..abc1e05593d 100644 --- a/pkg/destroy/bootstrap/bootstrap.go +++ b/pkg/destroy/bootstrap/bootstrap.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "time" "github.com/openshift/installer/pkg/asset/cluster" "github.com/openshift/installer/pkg/terraform" @@ -24,17 +25,12 @@ func Destroy(dir string) (err error) { return errors.New("no platform configured in metadata") } - copyNames := []string{terraform.StateFileName, cluster.TfVarsFileName} - - if platform == "libvirt" { - err = ioutil.WriteFile(filepath.Join(dir, "disable-bootstrap.tfvars"), []byte(`{ - "bootstrap_dns": false + err = ioutil.WriteFile(filepath.Join(dir, "disable-bootstrap-load-balancer-targets.tfvars"), []byte(`{ + "bootstrap_load_balancer_targets": false } `), 0666) - if err != nil { - return err - } - copyNames = append(copyNames, "disable-bootstrap.tfvars") + if err != nil { + return err } tempDir, err := ioutil.TempDir("", "openshift-install-") @@ -43,20 +39,24 @@ func Destroy(dir string) (err error) { } defer os.RemoveAll(tempDir) - for _, filename := range copyNames { + for _, filename := range []string{ + terraform.StateFileName, + cluster.TfVarsFileName, + "disable-bootstrap-load-balancer-targets.tfvars", + } { err = copy(filepath.Join(dir, filename), filepath.Join(tempDir, filename)) if err != nil { return errors.Wrapf(err, "failed to copy %s to the temporary directory", filename) } } - if platform == "libvirt" { - _, err = terraform.Apply(tempDir, platform, fmt.Sprintf("-var-file=%s", filepath.Join(tempDir, "disable-bootstrap.tfvars"))) - if err != nil { - return errors.Wrap(err, "Terraform apply") - } + _, err = terraform.Apply(tempDir, platform, fmt.Sprintf("-var-file=%s", filepath.Join(tempDir, "disable-bootstrap-load-balancer-targets.tfvars"))) + if err != nil { + return errors.Wrap(err, "Terraform apply") } + time.Sleep(10 * time.Second) + err = terraform.Destroy(tempDir, platform, "-target=module.bootstrap") if err != nil { return errors.Wrap(err, "Terraform destroy")