From f6e572b8a891507195965a0a320d45dbe47fdf29 Mon Sep 17 00:00:00 2001 From: Tim Geoghegan Date: Tue, 4 Jan 2022 16:13:54 -0800 Subject: [PATCH] terraform: use `kubectl_manifest` The `kubernetes_manifest` resource in provider `hashicorp/kubernetes` has a known issue[1] where resources created in a manifest can't depend on other resources that don't exist yet. To work around this, we instead use `gavinbunney/kubectl`'s `kubectl_manifest` resource, which does not have this problem because it uses a different mechanism for planning. [1] https://github.com/hashicorp/terraform-provider-kubernetes/issues/1380 Resolves #1088 --- terraform/main.tf | 16 ++++++++++--- terraform/modules/kubernetes/kubernetes.tf | 26 +++++++++++++++++----- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 6eaa84f2e..95787deee 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -323,6 +323,10 @@ terraform { source = "hashicorp/tls" version = "~> 3.1.0" } + kubectl = { + source = "gavinbunney/kubectl" + version = "~> 1.13.1" + } } } @@ -366,9 +370,6 @@ provider "kubernetes" { host = local.kubernetes_cluster.endpoint cluster_ca_certificate = base64decode(local.kubernetes_cluster.certificate_authority_data) token = local.kubernetes_cluster.token - experiments { - manifest_resource = true - } } provider "helm" { @@ -379,6 +380,15 @@ provider "helm" { } } +# We use provider kubectl to manage Kubernetes manifests, as a workaround for an +# issue in hashicorp/kubernetes: +# https://github.com/hashicorp/terraform-provider-kubernetes/issues/1380 +provider "kubectl" { + host = local.kubernetes_cluster.endpoint + cluster_ca_certificate = base64decode(local.kubernetes_cluster.certificate_authority_data) + token = local.kubernetes_cluster.token +} + module "manifest_gcp" { source = "./modules/manifest_gcp" count = var.use_aws ? 0 : 1 diff --git a/terraform/modules/kubernetes/kubernetes.tf b/terraform/modules/kubernetes/kubernetes.tf index 0beaf61b3..f77968b03 100644 --- a/terraform/modules/kubernetes/kubernetes.tf +++ b/terraform/modules/kubernetes/kubernetes.tf @@ -162,6 +162,20 @@ variable "gcp_workload_identity_pool_provider" { type = string } +# We repeat this declaration from main.tf to work around an issue where +# Terraform will look for hashicorp/kubectl instead of gavinbunney/kubectl +# https://github.com/gavinbunney/terraform-provider-kubectl/issues/39 +terraform { + required_version = ">= 0.14.8" + + required_providers { + kubectl = { + source = "gavinbunney/kubectl" + version = "~> 1.13.1" + } + } +} + locals { workflow_manager_iam_entity = "${var.environment}-${var.data_share_processor_name}-workflow-manager" } @@ -422,9 +436,9 @@ resource "kubernetes_deployment" "intake_batch" { } } -resource "kubernetes_manifest" "intake_queue_depth_metric" { +resource "kubectl_manifest" "intake_queue_depth_metric" { count = var.use_aws ? 1 : 0 - manifest = { + yaml_body = yamlencode({ apiVersion = "metrics.aws/v1alpha1" kind = "ExternalMetric" metadata = { @@ -450,7 +464,7 @@ resource "kubernetes_manifest" "intake_queue_depth_metric" { } }] } - } + }) } resource "kubernetes_horizontal_pod_autoscaler" "intake_batch_autoscaler" { @@ -648,9 +662,9 @@ resource "kubernetes_deployment" "aggregate" { } } -resource "kubernetes_manifest" "aggregate_queue_depth_metric" { +resource "kubectl_manifest" "aggregate_queue_depth_metric" { count = var.use_aws ? 1 : 0 - manifest = { + yaml_body = yamlencode({ apiVersion = "metrics.aws/v1alpha1" kind = "ExternalMetric" metadata = { @@ -676,7 +690,7 @@ resource "kubernetes_manifest" "aggregate_queue_depth_metric" { } }] } - } + }) } resource "kubernetes_horizontal_pod_autoscaler" "aggregate_autoscaler" {