-
-
Notifications
You must be signed in to change notification settings - Fork 355
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Use AWS API for EKS authentication and authorization (#206)
- Loading branch information
Showing
29 changed files
with
2,419 additions
and
1,270 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,157 +1,130 @@ | ||
# The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster | ||
# to allow worker nodes to join the cluster via AWS IAM role authentication. | ||
|
||
# NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster), | ||
# the requirements outlined here must be met: | ||
# https://learn.hashicorp.com/terraform/aws/eks-intro#preparation | ||
# https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks | ||
# https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes | ||
|
||
# Additional links | ||
# https://learn.hashicorp.com/terraform/aws/eks-intro | ||
# https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b | ||
# https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html | ||
# https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html | ||
# https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html | ||
# https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html | ||
# https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb | ||
# http://marcinkaszynski.com/2018/07/12/eks-auth.html | ||
# https://cloud.google.com/kubernetes-engine/docs/concepts/configmap | ||
# http://yaml-multiline.info | ||
# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216 | ||
# https://www.terraform.io/docs/cloud/run/install-software.html | ||
# https://stackoverflow.com/questions/26123740/is-it-possible-to-install-aws-cli-package-without-root-permission | ||
# https://stackoverflow.com/questions/58232731/kubectl-missing-form-terraform-cloud | ||
# https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html | ||
# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html | ||
|
||
|
||
locals { | ||
yaml_quote = var.aws_auth_yaml_strip_quotes ? "" : "\"" | ||
|
||
need_kubernetes_provider = local.enabled && var.apply_config_map_aws_auth | ||
|
||
kubeconfig_path_enabled = local.need_kubernetes_provider && var.kubeconfig_path_enabled | ||
kube_exec_auth_enabled = local.kubeconfig_path_enabled ? false : local.need_kubernetes_provider && var.kube_exec_auth_enabled | ||
kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : local.need_kubernetes_provider && var.kube_data_auth_enabled | ||
|
||
exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? ["--profile", var.kube_exec_auth_aws_profile] : [] | ||
exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? ["--role-arn", var.kube_exec_auth_role_arn] : [] | ||
|
||
cluster_endpoint_data = join("", aws_eks_cluster.default[*].endpoint) # use `join` instead of `one` to keep the value a string | ||
cluster_auth_map_endpoint = var.apply_config_map_aws_auth ? local.cluster_endpoint_data : var.dummy_kubeapi_server | ||
|
||
certificate_authority_data_list = coalescelist(aws_eks_cluster.default[*].certificate_authority, [[{ data : "" }]]) | ||
certificate_authority_data_list_internal = local.certificate_authority_data_list[0] | ||
certificate_authority_data_map = local.certificate_authority_data_list_internal[0] | ||
certificate_authority_data = local.certificate_authority_data_map["data"] | ||
|
||
# Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap | ||
# Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically | ||
map_worker_roles = [ | ||
for role_arn in var.workers_role_arns : { | ||
rolearn = role_arn | ||
username = "system:node:{{EC2PrivateDNSName}}" | ||
groups = [ | ||
"system:bootstrappers", | ||
"system:nodes" | ||
] | ||
} | ||
] | ||
} | ||
# Extract the cluster certificate for use in OIDC configuration | ||
certificate_authority_data = try(aws_eks_cluster.default[0].certificate_authority[0]["data"], "") | ||
|
||
eks_policy_short_abbreviation_map = { | ||
# List available policies with `aws eks list-access-policies --output table` | ||
|
||
Admin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy" | ||
ClusterAdmin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" | ||
Edit = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy" | ||
View = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" | ||
# Add new policies here | ||
} | ||
|
||
eks_policy_abbreviation_map = merge({ for k, v in local.eks_policy_short_abbreviation_map : format("AmazonEKS%sPolicy", k) => v }, | ||
local.eks_policy_short_abbreviation_map) | ||
|
||
resource "null_resource" "wait_for_cluster" { | ||
count = local.enabled && var.apply_config_map_aws_auth ? 1 : 0 | ||
depends_on = [ | ||
aws_eks_cluster.default, | ||
aws_security_group_rule.custom_ingress_rules, | ||
aws_security_group_rule.managed_ingress_security_groups, | ||
aws_security_group_rule.managed_ingress_cidr_blocks, | ||
] | ||
|
||
provisioner "local-exec" { | ||
command = var.wait_for_cluster_command | ||
interpreter = var.local_exec_interpreter | ||
environment = { | ||
ENDPOINT = local.cluster_endpoint_data | ||
} | ||
|
||
# Expand abbreviated access policies to full ARNs | ||
access_entry_expanded_map = { for k, v in var.access_entry_map : k => merge({ | ||
# Expand abbreviated policies to full ARNs | ||
access_policy_associations = { for kk, vv in v.access_policy_associations : try(local.eks_policy_abbreviation_map[kk], kk) => vv } | ||
# Copy over all other fields | ||
}, { for kk, vv in v : kk => vv if kk != "access_policy_associations" }) | ||
} | ||
|
||
# Replace membership in "system:masters" group with association to "ClusterAdmin" policy | ||
access_entry_map = { for k, v in local.access_entry_expanded_map : k => merge({ | ||
# Remove "system:masters" group from standard users | ||
kubernetes_groups = [for group in v.kubernetes_groups : group if group != "system:masters" || v.type != "STANDARD"] | ||
access_policy_associations = merge( | ||
# copy all existing associations | ||
v.access_policy_associations, | ||
# add "ClusterAdmin" policy if the user was in "system:masters" group and is a standard user | ||
contains(v.kubernetes_groups, "system:masters") && v.type == "STANDARD" ? { | ||
"arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" = { | ||
access_scope = { | ||
type = "cluster" | ||
namespaces = null | ||
} | ||
} | ||
} : {} | ||
) | ||
# Copy over all other fields | ||
}, { for kk, vv in v : kk => vv if kk != "kubernetes_groups" && kk != "access_policy_associations" }) | ||
} | ||
|
||
eks_access_policy_association_product_map = merge(flatten([ | ||
for k, v in local.access_entry_map : [for kk, vv in v.access_policy_associations : { format("%s-%s", k, kk) = { | ||
principal_arn = k | ||
policy_arn = kk | ||
} | ||
}] | ||
])...) | ||
} | ||
|
||
# The preferred way to keep track of entries is by key, but we also support list, | ||
# because keys need to be known at plan time, but list values do not. | ||
resource "aws_eks_access_entry" "map" { | ||
for_each = local.enabled ? local.access_entry_map : {} | ||
|
||
cluster_name = local.eks_cluster_id | ||
principal_arn = each.key | ||
kubernetes_groups = each.value.kubernetes_groups | ||
type = each.value.type | ||
|
||
# Get an authentication token to communicate with the EKS cluster. | ||
# By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster. | ||
# `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token. | ||
# If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token. | ||
# https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html | ||
# | ||
# You can set `kube_exec_auth_enabled` to use a different IAM Role or AWS config profile to fetch the auth token | ||
# | ||
data "aws_eks_cluster_auth" "eks" { | ||
count = local.kube_data_auth_enabled ? 1 : 0 | ||
name = one(aws_eks_cluster.default[*].id) | ||
tags = module.this.tags | ||
} | ||
|
||
resource "aws_eks_access_policy_association" "map" { | ||
for_each = local.enabled ? local.eks_access_policy_association_product_map : {} | ||
|
||
cluster_name = local.eks_cluster_id | ||
principal_arn = each.value.principal_arn | ||
policy_arn = each.value.policy_arn | ||
|
||
provider "kubernetes" { | ||
# Without a dummy API server configured, the provider will throw an error and prevent a "plan" from succeeding | ||
# in situations where Terraform does not provide it with the cluster endpoint before triggering an API call. | ||
# Since those situations are limited to ones where we do not care about the failure, such as fetching the | ||
# ConfigMap before the cluster has been created or in preparation for deleting it, and the worst that will | ||
# happen is that the aws-auth ConfigMap will be unnecessarily updated, it is just better to ignore the error | ||
# so we can proceed with the task of creating or destroying the cluster. | ||
# | ||
# If this solution bothers you, you can disable it by setting var.dummy_kubeapi_server = null | ||
host = local.cluster_auth_map_endpoint | ||
cluster_ca_certificate = local.enabled && !local.kubeconfig_path_enabled ? base64decode(local.certificate_authority_data) : null | ||
token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null | ||
# The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster | ||
# in KUBECONFIG is some other cluster, this will cause problems, so we override it always. | ||
config_path = local.kubeconfig_path_enabled ? var.kubeconfig_path : "" | ||
config_context = var.kubeconfig_context | ||
|
||
dynamic "exec" { | ||
for_each = local.kube_exec_auth_enabled && length(local.cluster_endpoint_data) > 0 ? ["exec"] : [] | ||
content { | ||
api_version = "client.authentication.k8s.io/v1beta1" | ||
command = "aws" | ||
args = concat(local.exec_profile, ["eks", "get-token", "--cluster-name", try(aws_eks_cluster.default[0].id, "deleted")], local.exec_role) | ||
} | ||
access_scope { | ||
type = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.type | ||
namespaces = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.namespaces | ||
} | ||
} | ||
|
||
resource "kubernetes_config_map" "aws_auth_ignore_changes" { | ||
count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes ? 1 : 0 | ||
depends_on = [null_resource.wait_for_cluster] | ||
# We could combine all the list access entries into a single resource, | ||
# but separating them by category minimizes the ripple effect of changes | ||
# due to adding and removing items from the list. | ||
resource "aws_eks_access_entry" "standard" { | ||
count = local.enabled ? length(var.access_entries) : 0 | ||
|
||
metadata { | ||
name = "aws-auth" | ||
namespace = "kube-system" | ||
} | ||
cluster_name = local.eks_cluster_id | ||
principal_arn = var.access_entries[count.index].principal_arn | ||
kubernetes_groups = var.access_entries[count.index].kubernetes_groups | ||
type = "STANDARD" | ||
|
||
data = { | ||
mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))) | ||
mapUsers = yamlencode(var.map_additional_iam_users) | ||
mapAccounts = yamlencode(var.map_additional_aws_accounts) | ||
} | ||
tags = module.this.tags | ||
} | ||
|
||
lifecycle { | ||
ignore_changes = [data["mapRoles"]] | ||
} | ||
resource "aws_eks_access_entry" "linux" { | ||
count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_LINUX", [])) : 0 | ||
|
||
cluster_name = local.eks_cluster_id | ||
principal_arn = var.access_entries_for_nodes.EC2_LINUX[count.index] | ||
type = "EC2_LINUX" | ||
|
||
tags = module.this.tags | ||
} | ||
|
||
resource "kubernetes_config_map" "aws_auth" { | ||
count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes == false ? 1 : 0 | ||
depends_on = [null_resource.wait_for_cluster] | ||
resource "aws_eks_access_entry" "windows" { | ||
count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_WINDOWS", [])) : 0 | ||
|
||
metadata { | ||
name = "aws-auth" | ||
namespace = "kube-system" | ||
} | ||
cluster_name = local.eks_cluster_id | ||
principal_arn = var.access_entries_for_nodes.EC2_WINDOWS[count.index] | ||
type = "EC2_WINDOWS" | ||
|
||
tags = module.this.tags | ||
} | ||
|
||
resource "aws_eks_access_policy_association" "list" { | ||
count = local.enabled ? length(var.access_policy_associations) : 0 | ||
|
||
cluster_name = local.eks_cluster_id | ||
principal_arn = var.access_policy_associations[count.index].principal_arn | ||
policy_arn = try(local.eks_policy_abbreviation_map[var.access_policy_associations[count.index].policy_arn], | ||
var.access_policy_associations[count.index].policy_arn) | ||
|
||
data = { | ||
mapRoles = replace(yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))), "\"", local.yaml_quote) | ||
mapUsers = replace(yamlencode(var.map_additional_iam_users), "\"", local.yaml_quote) | ||
mapAccounts = replace(yamlencode(var.map_additional_aws_accounts), "\"", local.yaml_quote) | ||
access_scope { | ||
type = var.access_policy_associations[count.index].access_scope.type | ||
namespaces = var.access_policy_associations[count.index].access_scope.namespaces | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.