Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[refactor]: Upgrade to support v18.X of community EKS module #91

Merged
merged 33 commits into from
Nov 17, 2022
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
da99165
initial refactor commit; wip
jrsdav Sep 7, 2022
021912a
Refactor mostly finished; more scenario testing required
jrsdav Sep 16, 2022
c36ac75
added cluster-name tag
jrsdav Sep 19, 2022
c00b996
Updated default scaling config for ngs
jrsdav Sep 21, 2022
c423d3d
Merge branch 'refactor/vnext' of https://github.com/streamnative/terr…
jrsdav Sep 21, 2022
721b58a
Added enable flag for istio
jrsdav Sep 22, 2022
b4f63b0
Added wildcard to default hosted zone id
jrsdav Sep 22, 2022
18850d3
Added default value for istio service domain
jrsdav Sep 22, 2022
d6bd912
Added velero
jrsdav Sep 26, 2022
961c11c
Changed deprecated bucket config
jrsdav Sep 26, 2022
07e1aab
Fixed velero OIDC inputs
jrsdav Sep 26, 2022
09f1ef2
Added ACL resource for velero bucket
jrsdav Sep 26, 2022
d148c70
Fixed NG arn to work with iam-authenticator at creation
jrsdav Sep 28, 2022
6cba5f8
Removed old submodules;added tiered storage;updated ng config;misc va…
jrsdav Sep 29, 2022
b66fe84
Added flag to enable istio explicitly
jrsdav Oct 4, 2022
a056476
added istio source logic for external-dns
jrsdav Oct 4, 2022
1bfbcf0
Updated velero plugin version
jrsdav Oct 6, 2022
77f90f4
Updated velero config
jrsdav Oct 6, 2022
2903ee3
Added more SG inputs;updated autoscaler perms;removed tiered storage …
jrsdav Oct 17, 2022
1faea93
removed calico inputs
jrsdav Oct 17, 2022
cc93459
removed unneeded RBAC configs
jrsdav Oct 17, 2022
74e486a
Added migration_mode flag for cleaner handling of upgrades
jrsdav Oct 19, 2022
aaf78fb
Increment parent module version; added external-dns domain filtering
jrsdav Oct 21, 2022
7a4f8fd
Removed kubernetes.io/cluster tag from all resources
jrsdav Oct 21, 2022
6f8e3f7
Add arns as outputs
Oct 24, 2022
cb042a0
Merge branch 'refactor-vnext' of github.com:streamnative/terraform-aw…
Oct 24, 2022
b5a089a
Expose cluster_certificate_authority_data to communicate with cluster
jdmaguire Oct 25, 2022
5ce7e5b
Update outputs.tf
jdmaguire Oct 25, 2022
37025c9
Added default node SG rules
jrsdav Nov 2, 2022
bb11b87
Merge branch 'refactor-vnext' of https://github.com/streamnative/terr…
jrsdav Nov 2, 2022
ae7ef66
AWS LB arn
Nov 4, 2022
206cb00
readme updates
jrsdav Nov 7, 2022
86fe9b4
Merge branch 'master' into refactor-vnext
Nov 17, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 27 additions & 98 deletions README.md

Large diffs are not rendered by default.

44 changes: 19 additions & 25 deletions aws_load_balancer_controller.tf
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ data "aws_iam_policy_document" "aws_load_balancer_controller" {
statement {
actions = ["ec2:CreateTags"]
effect = "Allow"
resources = ["arn:${var.aws_partition}:ec2:*:*:security-group/*"]
resources = ["arn:${local.aws_partition}:ec2:*:*:security-group/*"]
condition {
test = "StringEquals"
variable = "ec2:CreateAction"
Expand All @@ -109,7 +109,7 @@ data "aws_iam_policy_document" "aws_load_balancer_controller" {
"ec2:DeleteTags"
]
effect = "Allow"
resources = ["arn:${var.aws_partition}:ec2:*:*:security-group/*"]
resources = ["arn:${local.aws_partition}:ec2:*:*:security-group/*"]
condition {
test = "Null"
variable = "aws:RequestTag/elbv2.k8s.aws/cluster"
Expand Down Expand Up @@ -169,9 +169,9 @@ data "aws_iam_policy_document" "aws_load_balancer_controller" {
]
effect = "Allow"
resources = [
"arn:${var.aws_partition}:elasticloadbalancing:*:*:targetgroup/*/*",
"arn:${var.aws_partition}:elasticloadbalancing:*:*:loadbalancer/net/*/*",
"arn:${var.aws_partition}:elasticloadbalancing:*:*:loadbalancer/app/*/*"
"arn:${local.aws_partition}:elasticloadbalancing:*:*:targetgroup/*/*",
"arn:${local.aws_partition}:elasticloadbalancing:*:*:loadbalancer/net/*/*",
"arn:${local.aws_partition}:elasticloadbalancing:*:*:loadbalancer/app/*/*"
]
condition {
test = "Null"
Expand All @@ -192,10 +192,10 @@ data "aws_iam_policy_document" "aws_load_balancer_controller" {
]
effect = "Allow"
resources = [
"arn:${var.aws_partition}:elasticloadbalancing:*:*:listener/net/*/*/*",
"arn:${var.aws_partition}:elasticloadbalancing:*:*:listener/app/*/*/*",
"arn:${var.aws_partition}:elasticloadbalancing:*:*:listener-rule/net/*/*/*",
"arn:${var.aws_partition}:elasticloadbalancing:*:*:listener-rule/app/*/*/*"
"arn:${local.aws_partition}:elasticloadbalancing:*:*:listener/net/*/*/*",
"arn:${local.aws_partition}:elasticloadbalancing:*:*:listener/app/*/*/*",
"arn:${local.aws_partition}:elasticloadbalancing:*:*:listener-rule/net/*/*/*",
"arn:${local.aws_partition}:elasticloadbalancing:*:*:listener-rule/app/*/*/*"
]
}

Expand Down Expand Up @@ -225,7 +225,7 @@ data "aws_iam_policy_document" "aws_load_balancer_controller" {
"elasticloadbalancing:DeregisterTargets"
]
effect = "Allow"
resources = ["arn:${var.aws_partition}:elasticloadbalancing:*:*:targetgroup/*/*"]
resources = ["arn:${local.aws_partition}:elasticloadbalancing:*:*:targetgroup/*/*"]
}

statement {
Expand All @@ -249,7 +249,7 @@ data "aws_iam_policy_document" "aws_load_balancer_controller_sts" {
effect = "Allow"
principals {
type = "Federated"
identifiers = [format("arn:%s:iam::%s:oidc-provider/%s", var.aws_partition, local.account_id, local.oidc_issuer)]
identifiers = [format("arn:%s:iam::%s:oidc-provider/%s", local.aws_partition, local.account_id, local.oidc_issuer)]
}
condition {
test = "StringLike"
Expand All @@ -260,32 +260,30 @@ data "aws_iam_policy_document" "aws_load_balancer_controller_sts" {
}

resource "aws_iam_role" "aws_load_balancer_controller" {
count = var.enable_aws_load_balancer_controller ? 1 : 0
name = format("%s-lbc-role", module.eks.cluster_id)
description = format("Role used by IRSA and the KSA aws-load-balancer-controller on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
assume_role_policy = data.aws_iam_policy_document.aws_load_balancer_controller_sts.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = merge({ "Vendor" = "StreamNative" }, var.additional_tags)
tags = local.tags
}

resource "aws_iam_policy" "aws_load_balancer_controller" {
count = local.create_lb_policy ? 1 : 0
count = var.create_iam_policies ? 1 : 0
name = format("%s-AWSLoadBalancerControllerPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the AWS Load Balancer Controller addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
policy = data.aws_iam_policy_document.aws_load_balancer_controller.json
tags = merge({ "Vendor" = "StreamNative" }, var.additional_tags)
tags = local.tags
}

resource "aws_iam_role_policy_attachment" "aws_load_balancer_controller" {
count = var.enable_aws_load_balancer_controller ? 1 : 0
policy_arn = local.lb_policy_arn != "" ? local.lb_policy_arn : aws_iam_policy.aws_load_balancer_controller[0].arn
role = aws_iam_role.aws_load_balancer_controller[0].name
policy_arn = var.create_iam_policies ? aws_iam_policy.aws_load_balancer_controller[0].arn : local.default_lb_policy_arn
role = aws_iam_role.aws_load_balancer_controller.name
}

resource "helm_release" "aws_load_balancer_controller" {
count = var.enable_aws_load_balancer_controller ? 1 : 0
count = var.enable_bootstrap ? 1 : 0
atomic = true
chart = var.aws_load_balancer_controller_helm_chart_name
cleanup_on_fail = true
Expand All @@ -299,11 +297,12 @@ resource "helm_release" "aws_load_balancer_controller" {
defaultTags = merge(var.additional_tags, {
"Vendor" = "StreamNative"
})
replicaCount = 2
serviceAccount = {
create = true
name = "aws-load-balancer-controller"
annotations = {
"eks.amazonaws.com/role-arn" = aws_iam_role.aws_load_balancer_controller[0].arn
"eks.amazonaws.com/role-arn" = aws_iam_role.aws_load_balancer_controller.arn
}
}
})]
Expand All @@ -316,11 +315,6 @@ resource "helm_release" "aws_load_balancer_controller" {
}
}

set {
name = "defaultTags.Vendor"
value = "StreamNative"
}

depends_on = [
module.eks
]
Expand Down
1 change: 1 addition & 0 deletions aws_node_termination_handler.tf
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#

resource "helm_release" "node_termination_handler" {
count = var.enable_bootstrap ? 1 : 0
atomic = true
chart = var.node_termination_handler_helm_chart_name
cleanup_on_fail = true
Expand Down
53 changes: 0 additions & 53 deletions calico.tf

This file was deleted.

41 changes: 17 additions & 24 deletions cert_manager.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ data "aws_iam_policy_document" "cert_manager" {
"route53:GetChange"
]
resources = [
"arn:${var.aws_partition}:route53:::change/*"
"arn:${local.aws_partition}:route53:::change/*"
]
effect = "Allow"
}
Expand All @@ -36,7 +36,7 @@ data "aws_iam_policy_document" "cert_manager" {
"route53:ListResourceRecordSets"
]
resources = [
"arn:${var.aws_partition}:route53:::hostedzone/${var.hosted_zone_id}"
"arn:${local.aws_partition}:route53:::hostedzone/${var.hosted_zone_id}"
]
effect = "Allow"
}
Expand All @@ -61,7 +61,7 @@ data "aws_iam_policy_document" "cert_manager_sts" {
effect = "Allow"
principals {
type = "Federated"
identifiers = [format("arn:%s:iam::%s:oidc-provider/%s", var.aws_partition, local.account_id, local.oidc_issuer)]
identifiers = [format("arn:%s:iam::%s:oidc-provider/%s", local.aws_partition, local.account_id, local.oidc_issuer)]
}
condition {
test = "StringLike"
Expand All @@ -72,32 +72,30 @@ data "aws_iam_policy_document" "cert_manager_sts" {
}

resource "aws_iam_role" "cert_manager" {
count = var.enable_cert_manager ? 1 : 0
name = format("%s-cm-role", module.eks.cluster_id)
description = format("Role assumed by IRSA and the KSA cert-manager on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
assume_role_policy = data.aws_iam_policy_document.cert_manager_sts.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = merge({ "Vendor" = "StreamNative" }, var.additional_tags)
tags = local.tags
}

resource "aws_iam_policy" "cert_manager" {
count = local.create_cert_man_policy ? 1 : 0
count = var.create_iam_policies ? 1 : 0
name = format("%s-CertManagerPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the Cert-Manager addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
policy = data.aws_iam_policy_document.cert_manager.json
tags = merge({ "Vendor" = "StreamNative" }, var.additional_tags)
tags = local.tags
}

resource "aws_iam_role_policy_attachment" "cert_manager" {
count = var.enable_cert_manager ? 1 : 0
policy_arn = local.sn_serv_policy_arn != "" ? local.sn_serv_policy_arn : aws_iam_policy.cert_manager[0].arn
role = aws_iam_role.cert_manager[0].name
policy_arn = var.create_iam_policies ? aws_iam_policy.cert_manager[0].arn : local.default_service_policy_arn
role = aws_iam_role.cert_manager.name
}

resource "helm_release" "cert_manager" {
count = var.enable_cert_manager ? 1 : 0
count = var.enable_bootstrap ? 1 : 0
atomic = true
chart = var.cert_manager_helm_chart_name
cleanup_on_fail = true
Expand All @@ -114,15 +112,14 @@ resource "helm_release" "cert_manager" {
]
serviceAccount = {
annotations = {
"eks.amazonaws.com/role-arn" = aws_iam_role.cert_manager[0].arn
"eks.amazonaws.com/role-arn" = aws_iam_role.cert_manager.arn
}
}
podSecurityContext = {
fsGroup = 65534
}
}
kubeVersion = var.cluster_version

})]

dynamic "set" {
Expand All @@ -139,23 +136,19 @@ resource "helm_release" "cert_manager" {
}

resource "helm_release" "cert_issuer" {
count = var.enable_cert_manager ? 1 : 0
count = var.enable_bootstrap ? 1 : 0
atomic = true
chart = "${path.module}/charts/cert-issuer"
cleanup_on_fail = true
name = "cert-issuer"
namespace = kubernetes_namespace.sn_system.metadata[0].name
timeout = 300

set {
name = "supportEmail"
value = var.cert_issuer_support_email
}

set {
name = "dns01.region"
value = var.region
}
values = [yamlencode({
supportEmail = var.cert_issuer_support_email
dns01 = {
region = var.region
}
})]

depends_on = [
helm_release.cert_manager
Expand Down
22 changes: 10 additions & 12 deletions cluster_autoscaler.tf
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ data "aws_iam_policy_document" "cluster_autoscaler_sts" {
effect = "Allow"
principals {
type = "Federated"
identifiers = [format("arn:%s:iam::%s:oidc-provider/%s", var.aws_partition, local.account_id, local.oidc_issuer)]
identifiers = [format("arn:%s:iam::%s:oidc-provider/%s", local.aws_partition, local.account_id, local.oidc_issuer)]
}
condition {
test = "StringLike"
Expand All @@ -70,28 +70,26 @@ data "aws_iam_policy_document" "cluster_autoscaler_sts" {
}

resource "aws_iam_role" "cluster_autoscaler" {
count = var.enable_cluster_autoscaler ? 1 : 0
name = format("%s-ca-role", module.eks.cluster_id)
description = format("Role used by IRSA and the KSA cluster-autoscaler on StreamNative Cloud EKS cluster %s", module.eks.cluster_id)
assume_role_policy = data.aws_iam_policy_document.cluster_autoscaler_sts.json
path = "/StreamNative/"
permissions_boundary = var.permissions_boundary_arn
tags = merge({ "Vendor" = "StreamNative" }, var.additional_tags)
tags = local.tags
}

resource "aws_iam_policy" "cluster_autoscaler" {
count = local.create_ca_policy ? 1 : 0
count = var.create_iam_policies ? 1 : 0
name = format("%s-ClusterAutoscalerPolicy", module.eks.cluster_id)
description = "Policy that defines the permissions for the Cluster Autoscaler addon service running in a StreamNative Cloud EKS cluster"
path = "/StreamNative/"
policy = data.aws_iam_policy_document.cluster_autoscaler.json
tags = merge({ "Vendor" = "StreamNative" }, var.additional_tags)
tags = local.tags
}

resource "aws_iam_role_policy_attachment" "cluster_autoscaler" {
count = var.enable_cluster_autoscaler ? 1 : 0
policy_arn = local.sn_serv_policy_arn != "" ? local.sn_serv_policy_arn : aws_iam_policy.cluster_autoscaler[0].arn
role = aws_iam_role.cluster_autoscaler[0].name
policy_arn = var.create_iam_policies ? aws_iam_policy.cluster_autoscaler[0].arn : local.default_service_policy_arn
role = aws_iam_role.cluster_autoscaler.name
}

############
Expand All @@ -113,7 +111,7 @@ locals {

}
resource "helm_release" "cluster_autoscaler" {
count = var.enable_cluster_autoscaler ? 1 : 0
count = var.enable_bootstrap ? 1 : 0
atomic = true
chart = var.cluster_autoscaler_helm_chart_name
cleanup_on_fail = true
Expand Down Expand Up @@ -151,21 +149,21 @@ resource "helm_release" "cluster_autoscaler" {
}
]
image = {
tag = lookup(local.k8s_to_autoscaler_version, var.cluster_version, "v1.20.1") # image.tag defaults to the version corresponding to var.cluster_version's default value and must manually be updated
tag = lookup(local.k8s_to_autoscaler_version, var.cluster_version, "v1.21.1") # image.tag defaults to the version corresponding to var.cluster_version's default value and must manually be updated
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should default to 1.22 or 1.23.

}
rbac = {
create = true
pspEnabled = true
serviceAccount = {
annotations = {
"eks.amazonaws.com/role-arn" = aws_iam_role.cluster_autoscaler[0].arn
"eks.amazonaws.com/role-arn" = aws_iam_role.cluster_autoscaler.arn
},
create = true
name = "cluster-autoscaler"
automountServiceAccountToken = true
}
}
replicaCount = "1"
replicaCount = "2"
resources = {
limits = {
cpu = "200m"
Expand Down
Loading