Skip to content

Commit

Permalink
Adding a base EKS deployment and overlay (#298)
Browse files Browse the repository at this point in the history
  • Loading branch information
wild-endeavor authored Apr 28, 2020
1 parent 6f8e5aa commit 2c49806
Show file tree
Hide file tree
Showing 46 changed files with 2,189 additions and 2 deletions.
942 changes: 942 additions & 0 deletions deployment/eks/flyte_generated.yaml

Large diffs are not rendered by default.

25 changes: 25 additions & 0 deletions eks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@

Steps:
- Make sure kubectl and aws-cli are installed and working
- Make sure terraform is installed and working and references aws-cli permissions
- Run Terraform files. (There seems to be a race condition in one of the IAM role creation steps - you may need to run it twice.)
- Copy or update the kubectl config file and switch to that context.
- Create the webhook
- Create ECR repo for the webhook
- Build the image and push
- Run the make cluster-up command with the right image
- Create the example 2048 game on the EKS IAM page linked above. Keep in mind that even after an address shows up in the ingress, it may take a while to provision.
- Delete the game
- Create the spare datacatalog reference in the db.
- Follow the [Installation portion](https://github.com/aws/amazon-eks-pod-identity-webhook/blob/95808cffe6d801822dae122f2f2c87a258d70bb8/README.md#installation) of the webhook readme. You will need to make sure to use your own AWS account number, and will also need to build your own image and upload it to your ECR, which will probably require you to create that repository in your ECR.
- Go through all the overlays in the `kustomize/overlays/eks` folder and make sure all the service accounts and RDS addresses reference yours. (Do a grep for `111222333456` and `456123e6ivib`).
- Install Flyte with `kubectl apply -f deployment/eks/flyte_generated.yaml`

This is the webhook used to inject IAM role credentials into pods.
https://github.com/aws/amazon-eks-pod-identity-webhook

This is how you get pods to use the proper roles. (This is the KIAM replacement.)
https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html
The implementation of these steps is done for you in the `alb-ingress` submodule.


161 changes: 161 additions & 0 deletions eks/tf/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
terraform {
required_version = ">= 0.12.0"
}

provider "aws" {
profile = "default"
region = var.region
}

# Use the internal module to create an RDS instance with a user-supplied VPC
module "flyte_rds" {
source = "./modules/flyte-rds"

rds_vpc = var.rds_vpc
}

# Use the internal module to create an EKS cluster, which has its own VPC
module "flyte_eks" {
source = "./modules/flyte-eks"

eks_cluster_name = var.eks_cluster_name
}

# Get information about the two VPCs
data "aws_vpc" "rds_vpc" {
id = var.rds_vpc
}

data "aws_vpc" "eks_vpc" {
id = module.flyte_eks.eks_vpc_id
}

# Get information about the RDS instance
data "aws_db_instance" "flyte_rds" {
db_instance_identifier = module.flyte_rds.admin_rds_instance_id
}

resource "aws_vpc_peering_connection" "eks_to_main_peering" {
peer_vpc_id = var.rds_vpc
vpc_id = module.flyte_eks.eks_vpc_id
auto_accept = true

tags = {
Name = "VPC peering connection between Flyte RDS and EKS"
}

accepter {
allow_remote_vpc_dns_resolution = true
}

requester {
allow_remote_vpc_dns_resolution = true
}
}

data "aws_route_table" "eks_public_route_table" {
vpc_id = module.flyte_eks.eks_vpc_id
filter {
name = "tag:Name"
values = ["${var.eks_cluster_name}-vpc-public"]
}
}

resource "aws_route" "route_rds_cidr" {
route_table_id = data.aws_route_table.eks_public_route_table.id
destination_cidr_block = data.aws_vpc.rds_vpc.cidr_block
vpc_peering_connection_id = aws_vpc_peering_connection.eks_to_main_peering.id
}

# Add a rule to the RDS security group to allow access from the EKS VPC
resource "aws_security_group_rule" "allow_eks_to_rds" {
type = "ingress"
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = [data.aws_vpc.eks_vpc.cidr_block]
security_group_id = data.aws_db_instance.flyte_rds.vpc_security_groups[0]
}

# The following implements the instructions set forth by:
# https://github.com/aws/amazon-eks-pod-identity-webhook/blob/95808cffe6d801822dae122f2f2c87a258d70bb8/README.md
# This is a webhook that will allow pods to assume arbitrarily constrained roles via their service account.
# TODO: This should be moved into a separate module probably but will require further refactoring as the assume role
# policy used is also used further below in the ALB ingress module.

# Create an oidc provider using the EKS cluster's public OIDC discovery endpoint
resource "aws_iam_openid_connect_provider" "eks_oidc_connection" {
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280"]
url = module.flyte_eks.eks_oidc_issuer
}

locals {
issuer_parsed = regex("^arn.*(?P<trailing>oidc.eks.*)", aws_iam_openid_connect_provider.eks_oidc_connection.arn)
}

# This is the trust document that will allow pods to use roles that they specify in their service account
data "aws_iam_policy_document" "let_pods_assume_roles" {
statement {
actions = ["sts:AssumeRoleWithWebIdentity"]

principals {
type = "Federated"
identifiers = [aws_iam_openid_connect_provider.eks_oidc_connection.arn]
}

condition {
test = "StringLike"
variable = "${local.issuer_parsed.trailing}:sub"

values = [
"system:serviceaccount:*:*",
]
}
}
}

# Make a role for Flyte components themselves to use
resource "aws_iam_role" "flyte_operator" {
name = "flyte-operator"
assume_role_policy = data.aws_iam_policy_document.let_pods_assume_roles.json
}


# Make a policy document
# TODO: Scope this down later
data "aws_iam_policy_document" "all_s3_access" {
statement {
actions = [
"s3:*",
]

resources = [
"*",
]
}
}

# Use the policy document to create a policy
resource "aws_iam_policy" "flyte_operator_s3_access" {
name = "flyte_operator_s3_access"
path = "/"
policy = data.aws_iam_policy_document.all_s3_access.json
}

# Attach the policy to the flyte operator role
resource "aws_iam_role_policy_attachment" "flyte_operator_s3_attach" {
role = aws_iam_role.flyte_operator.name
policy_arn = aws_iam_policy.flyte_operator_s3_access.arn
}

module "alb_ingress" {
source = "./modules/alb-ingress"

region = var.region
eks_cluster_name = var.eks_cluster_name
cluster_id = module.flyte_eks.cluster_id
eks_vpc_id = module.flyte_eks.eks_vpc_id
assume_role_policy_string = data.aws_iam_policy_document.let_pods_assume_roles.json
}

183 changes: 183 additions & 0 deletions eks/tf/modules/alb-ingress/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
# As required by https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
data "http" "alb_ingress_policy" {
url = "https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.4/docs/examples/iam-policy.json"

request_headers = {
Accept = "application/json"
}
}

resource "aws_iam_policy" "k8s_alb_ingress_controller" {
name = "ALBIngressControllerIAMPolicy"
path = "/"
policy = data.http.alb_ingress_policy.body
}

resource "aws_iam_role" "eks_alb_ingress_controller" {
name = "eks-alb-ingress-controller"
assume_role_policy = var.assume_role_policy_string
}

# Attach the policy to the flyte operator role
resource "aws_iam_role_policy_attachment" "eks_alb_attachment" {
role = aws_iam_role.eks_alb_ingress_controller.name
policy_arn = aws_iam_policy.k8s_alb_ingress_controller.arn
}

data "aws_eks_cluster" "cluster" {
name = var.cluster_id
}

data "aws_eks_cluster_auth" "cluster" {
name = var.cluster_id
}

provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.9"
}

resource "kubernetes_cluster_role" "alb_ingress_controller" {
metadata {
name = "alb-ingress-controller"

labels = {
"app.kubernetes.io/name" : "alb-ingress-controller"
}
}

rule {
api_groups = [
"",
"extensions",
]
resources = [
"configmaps",
"endpoints",
"events",
"ingresses",
"ingresses/status",
"services",
]
verbs = [
"create",
"get",
"list",
"update",
"watch",
"patch",
]
}

rule {
api_groups = [
"",
"extensions",
]
resources = [
"nodes",
"pods",
"secrets",
"services",
"namespaces",
]
verbs = [
"get",
"list",
"watch",
]
}
}

resource "kubernetes_service_account" "alb_ingress_controller" {
metadata {
name = "alb-ingress-controller"
namespace = "kube-system"

labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}

annotations = {
"eks.amazonaws.com/role-arn" = aws_iam_role.eks_alb_ingress_controller.arn
}
}
}

resource "kubernetes_cluster_role_binding" "alb_ingress_controller" {
metadata {
name = "alb-ingress-controller"

labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}

role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = kubernetes_cluster_role.alb_ingress_controller.metadata[0].name
}

subject {
kind = "ServiceAccount"
name = kubernetes_service_account.alb_ingress_controller.metadata[0].name
namespace = "kube-system"
}
}

resource "kubernetes_deployment" "alb_ingress_controller" {
metadata {
name = "alb-ingress-controller"
namespace = "kube-system"

labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}

spec {
selector {
match_labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}

template {
metadata {
name = "alb-ingress-controller"
namespace = "kube-system"

labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}

spec {
container {
name = "alb-ingress-controller"
image = "docker.io/amazon/aws-alb-ingress-controller:v1.1.4"
args = [
"--ingress-class=alb",
"--cluster-name=${var.eks_cluster_name}",
"--aws-vpc-id=${var.eks_vpc_id}",
"--aws-region=${var.region}",
"--feature-gates=waf=false",
]
}

service_account_name = kubernetes_service_account.alb_ingress_controller.metadata[0].name
automount_service_account_token = true

node_selector = {
"beta.kubernetes.io/os" = "linux"
}
}
}
}
}


Empty file.
20 changes: 20 additions & 0 deletions eks/tf/modules/alb-ingress/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
variable "region" {
type = string
}

variable "eks_cluster_name" {
type = string
}

variable "eks_vpc_id" {
type = string
}

variable "cluster_id" {
type = string
}

variable "assume_role_policy_string" {
type = string
}

Loading

0 comments on commit 2c49806

Please sign in to comment.