Skip to content

Commit

Permalink
Use AWS API for EKS authentication and authorization (#206)
Browse files Browse the repository at this point in the history
  • Loading branch information
Nuru authored Mar 11, 2024
1 parent 117a675 commit ff27afa
Show file tree
Hide file tree
Showing 29 changed files with 2,419 additions and 1,270 deletions.
18 changes: 15 additions & 3 deletions .github/renovate.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"extends": [
"config:base",
"config:recommended",
":preserveSemverRanges",
":rebaseStalePrs"
],
Expand All @@ -9,6 +9,18 @@
"dependencyDashboardAutoclose": true,
"enabledManagers": ["terraform"],
"terraform": {
"ignorePaths": ["**/context.tf"]
}
"ignorePaths": ["**/context.tf", "**/examples/obsolete*/**"]
},
"timezone": "America/New_York",
"packageRules": [
{
"matchFileNames": [
"/*.tf",
"examples/complete/*.tf"
],
"groupName": "all",
"schedule": ["every 4 weeks on wednesday at 04:00 am"],
"groupSlug": "monthly"
}
]
}
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.

Copyright 2018-2023 Cloud Posse, LLC
Copyright 2018-2024 Cloud Posse, LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down
372 changes: 149 additions & 223 deletions README.md

Large diffs are not rendered by default.

248 changes: 106 additions & 142 deletions README.yaml

Large diffs are not rendered by default.

243 changes: 108 additions & 135 deletions auth.tf
Original file line number Diff line number Diff line change
@@ -1,157 +1,130 @@
# The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster
# to allow worker nodes to join the cluster via AWS IAM role authentication.

# NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster),
# the requirements outlined here must be met:
# https://learn.hashicorp.com/terraform/aws/eks-intro#preparation
# https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks
# https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes

# Additional links
# https://learn.hashicorp.com/terraform/aws/eks-intro
# https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b
# https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
# https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html
# https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html
# https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html
# https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb
# http://marcinkaszynski.com/2018/07/12/eks-auth.html
# https://cloud.google.com/kubernetes-engine/docs/concepts/configmap
# http://yaml-multiline.info
# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216
# https://www.terraform.io/docs/cloud/run/install-software.html
# https://stackoverflow.com/questions/26123740/is-it-possible-to-install-aws-cli-package-without-root-permission
# https://stackoverflow.com/questions/58232731/kubectl-missing-form-terraform-cloud
# https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html
# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html


locals {
yaml_quote = var.aws_auth_yaml_strip_quotes ? "" : "\""

need_kubernetes_provider = local.enabled && var.apply_config_map_aws_auth

kubeconfig_path_enabled = local.need_kubernetes_provider && var.kubeconfig_path_enabled
kube_exec_auth_enabled = local.kubeconfig_path_enabled ? false : local.need_kubernetes_provider && var.kube_exec_auth_enabled
kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : local.need_kubernetes_provider && var.kube_data_auth_enabled

exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? ["--profile", var.kube_exec_auth_aws_profile] : []
exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? ["--role-arn", var.kube_exec_auth_role_arn] : []

cluster_endpoint_data = join("", aws_eks_cluster.default[*].endpoint) # use `join` instead of `one` to keep the value a string
cluster_auth_map_endpoint = var.apply_config_map_aws_auth ? local.cluster_endpoint_data : var.dummy_kubeapi_server

certificate_authority_data_list = coalescelist(aws_eks_cluster.default[*].certificate_authority, [[{ data : "" }]])
certificate_authority_data_list_internal = local.certificate_authority_data_list[0]
certificate_authority_data_map = local.certificate_authority_data_list_internal[0]
certificate_authority_data = local.certificate_authority_data_map["data"]

# Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap
# Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically
map_worker_roles = [
for role_arn in var.workers_role_arns : {
rolearn = role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"system:bootstrappers",
"system:nodes"
]
}
]
}
# Extract the cluster certificate for use in OIDC configuration
certificate_authority_data = try(aws_eks_cluster.default[0].certificate_authority[0]["data"], "")

eks_policy_short_abbreviation_map = {
# List available policies with `aws eks list-access-policies --output table`

Admin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy"
ClusterAdmin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
Edit = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy"
View = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
# Add new policies here
}

eks_policy_abbreviation_map = merge({ for k, v in local.eks_policy_short_abbreviation_map : format("AmazonEKS%sPolicy", k) => v },
local.eks_policy_short_abbreviation_map)

resource "null_resource" "wait_for_cluster" {
count = local.enabled && var.apply_config_map_aws_auth ? 1 : 0
depends_on = [
aws_eks_cluster.default,
aws_security_group_rule.custom_ingress_rules,
aws_security_group_rule.managed_ingress_security_groups,
aws_security_group_rule.managed_ingress_cidr_blocks,
]

provisioner "local-exec" {
command = var.wait_for_cluster_command
interpreter = var.local_exec_interpreter
environment = {
ENDPOINT = local.cluster_endpoint_data
}

# Expand abbreviated access policies to full ARNs
access_entry_expanded_map = { for k, v in var.access_entry_map : k => merge({
# Expand abbreviated policies to full ARNs
access_policy_associations = { for kk, vv in v.access_policy_associations : try(local.eks_policy_abbreviation_map[kk], kk) => vv }
# Copy over all other fields
}, { for kk, vv in v : kk => vv if kk != "access_policy_associations" })
}

# Replace membership in "system:masters" group with association to "ClusterAdmin" policy
access_entry_map = { for k, v in local.access_entry_expanded_map : k => merge({
# Remove "system:masters" group from standard users
kubernetes_groups = [for group in v.kubernetes_groups : group if group != "system:masters" || v.type != "STANDARD"]
access_policy_associations = merge(
# copy all existing associations
v.access_policy_associations,
# add "ClusterAdmin" policy if the user was in "system:masters" group and is a standard user
contains(v.kubernetes_groups, "system:masters") && v.type == "STANDARD" ? {
"arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" = {
access_scope = {
type = "cluster"
namespaces = null
}
}
} : {}
)
# Copy over all other fields
}, { for kk, vv in v : kk => vv if kk != "kubernetes_groups" && kk != "access_policy_associations" })
}

eks_access_policy_association_product_map = merge(flatten([
for k, v in local.access_entry_map : [for kk, vv in v.access_policy_associations : { format("%s-%s", k, kk) = {
principal_arn = k
policy_arn = kk
}
}]
])...)
}

# The preferred way to keep track of entries is by key, but we also support list,
# because keys need to be known at plan time, but list values do not.
resource "aws_eks_access_entry" "map" {
for_each = local.enabled ? local.access_entry_map : {}

cluster_name = local.eks_cluster_id
principal_arn = each.key
kubernetes_groups = each.value.kubernetes_groups
type = each.value.type

# Get an authentication token to communicate with the EKS cluster.
# By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster.
# `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token.
# If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token.
# https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html
#
# You can set `kube_exec_auth_enabled` to use a different IAM Role or AWS config profile to fetch the auth token
#
data "aws_eks_cluster_auth" "eks" {
count = local.kube_data_auth_enabled ? 1 : 0
name = one(aws_eks_cluster.default[*].id)
tags = module.this.tags
}

resource "aws_eks_access_policy_association" "map" {
for_each = local.enabled ? local.eks_access_policy_association_product_map : {}

cluster_name = local.eks_cluster_id
principal_arn = each.value.principal_arn
policy_arn = each.value.policy_arn

provider "kubernetes" {
# Without a dummy API server configured, the provider will throw an error and prevent a "plan" from succeeding
# in situations where Terraform does not provide it with the cluster endpoint before triggering an API call.
# Since those situations are limited to ones where we do not care about the failure, such as fetching the
# ConfigMap before the cluster has been created or in preparation for deleting it, and the worst that will
# happen is that the aws-auth ConfigMap will be unnecessarily updated, it is just better to ignore the error
# so we can proceed with the task of creating or destroying the cluster.
#
# If this solution bothers you, you can disable it by setting var.dummy_kubeapi_server = null
host = local.cluster_auth_map_endpoint
cluster_ca_certificate = local.enabled && !local.kubeconfig_path_enabled ? base64decode(local.certificate_authority_data) : null
token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null
# The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster
# in KUBECONFIG is some other cluster, this will cause problems, so we override it always.
config_path = local.kubeconfig_path_enabled ? var.kubeconfig_path : ""
config_context = var.kubeconfig_context

dynamic "exec" {
for_each = local.kube_exec_auth_enabled && length(local.cluster_endpoint_data) > 0 ? ["exec"] : []
content {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = concat(local.exec_profile, ["eks", "get-token", "--cluster-name", try(aws_eks_cluster.default[0].id, "deleted")], local.exec_role)
}
access_scope {
type = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.type
namespaces = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.namespaces
}
}

resource "kubernetes_config_map" "aws_auth_ignore_changes" {
count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes ? 1 : 0
depends_on = [null_resource.wait_for_cluster]
# We could combine all the list access entries into a single resource,
# but separating them by category minimizes the ripple effect of changes
# due to adding and removing items from the list.
resource "aws_eks_access_entry" "standard" {
count = local.enabled ? length(var.access_entries) : 0

metadata {
name = "aws-auth"
namespace = "kube-system"
}
cluster_name = local.eks_cluster_id
principal_arn = var.access_entries[count.index].principal_arn
kubernetes_groups = var.access_entries[count.index].kubernetes_groups
type = "STANDARD"

data = {
mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles)))
mapUsers = yamlencode(var.map_additional_iam_users)
mapAccounts = yamlencode(var.map_additional_aws_accounts)
}
tags = module.this.tags
}

lifecycle {
ignore_changes = [data["mapRoles"]]
}
resource "aws_eks_access_entry" "linux" {
count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_LINUX", [])) : 0

cluster_name = local.eks_cluster_id
principal_arn = var.access_entries_for_nodes.EC2_LINUX[count.index]
type = "EC2_LINUX"

tags = module.this.tags
}

resource "kubernetes_config_map" "aws_auth" {
count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes == false ? 1 : 0
depends_on = [null_resource.wait_for_cluster]
resource "aws_eks_access_entry" "windows" {
count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_WINDOWS", [])) : 0

metadata {
name = "aws-auth"
namespace = "kube-system"
}
cluster_name = local.eks_cluster_id
principal_arn = var.access_entries_for_nodes.EC2_WINDOWS[count.index]
type = "EC2_WINDOWS"

tags = module.this.tags
}

resource "aws_eks_access_policy_association" "list" {
count = local.enabled ? length(var.access_policy_associations) : 0

cluster_name = local.eks_cluster_id
principal_arn = var.access_policy_associations[count.index].principal_arn
policy_arn = try(local.eks_policy_abbreviation_map[var.access_policy_associations[count.index].policy_arn],
var.access_policy_associations[count.index].policy_arn)

data = {
mapRoles = replace(yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))), "\"", local.yaml_quote)
mapUsers = replace(yamlencode(var.map_additional_iam_users), "\"", local.yaml_quote)
mapAccounts = replace(yamlencode(var.map_additional_aws_accounts), "\"", local.yaml_quote)
access_scope {
type = var.access_policy_associations[count.index].access_scope.type
namespaces = var.access_policy_associations[count.index].access_scope.namespaces
}
}
4 changes: 3 additions & 1 deletion docs/migration-v1-v2.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ by the user, to ensure the nodes and control plane can communicate.
Before version 2, this module, by default, created an additional Security Group. Prior to version `0.19.0` of this module, that additional Security Group was the only one exposed by
this module (because EKS at the time did not create the managed Security Group for the cluster), and it was intended that all worker nodes (managed and unmanaged) be placed in this
additional Security Group. With version `0.19.0`, this module exposed the managed Security Group created by the EKS cluster, in which all managed node groups are placed by default. We now
recommend placing non-managed node groups in the EKS-created Security Group as well by using the `allowed_security_group_ids` variable, and not create an additional Security Group.
recommend placing non-managed node groups in the EKS-created Security Group
as well by using the `eks_cluster_managed_security_group_id` output to
associate the node groups with it, and not create an additional Security Group.

See https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html for more details.

Expand Down
Loading

0 comments on commit ff27afa

Please sign in to comment.