Skip to content

Commit

Permalink
Merge pull request #5 from clouddrove/fix/terraform
Browse files Browse the repository at this point in the history
Fix/terraform
  • Loading branch information
clouddrove-ci authored Jul 5, 2023
2 parents 5814832 + 732dcb3 commit f96e375
Show file tree
Hide file tree
Showing 30 changed files with 1,059 additions and 66 deletions.
16 changes: 16 additions & 0 deletions _examples/complete/locals.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@

locals {

name = "tf-helm-addons"
region = "us-east-1"

vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)

tags = {
Example = local.name
GithubRepo = "terraform-aws-vpc"
GithubOrg = "clouddrove"
}
cluster_version = "1.26"
}
227 changes: 196 additions & 31 deletions _examples/complete/main.tf
Original file line number Diff line number Diff line change
@@ -1,62 +1,102 @@
# ------------------------------------------------------------------------------
# Resources
# ------------------------------------------------------------------------------

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "4.0.1"
version = "5.0.0"

name = "${local.name}-vpc"
cidr = local.vpc_cidr

azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k * 8 + 1)]
public_subnets = [for k, v in local.azs_public : cidrsubnet(local.vpc_cidr, 8, k * 8 + 2)]
database_subnets = [for k, v in local.azs_database : cidrsubnet(local.vpc_cidr, 8, k * 8 + 3)]
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 4)]
database_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 8)]

enable_nat_gateway = true
single_nat_gateway = true
create_database_subnet_group = false

public_subnet_tags = {
"kubernetes.io/cluster/${local.name}-eks-cluster" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/elb" = "1"
}

private_subnet_tags = {
"kubernetes.io/cluster/${local.name}-eks-cluster" = "shared"
"kubernetes.io/role/internal-elb" = 1
"kubernetes.io/role/internal-elb" = 1
}

tags = local.tags
}

################################################################################
# VPC Supporting Resources
################################################################################

data "aws_security_group" "default" {
name = "default"
vpc_id = module.vpc.vpc_id
}

###############################################################################
# AWS EKS
###############################################################################
data "aws_eks_cluster" "eks_cluster" {
# this makes downstream resources wait for data plane to be ready
name = module.eks.cluster_name
depends_on = [
module.eks.cluster_id
]
}

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "19.13.1"
version = "19.15.3"

cluster_name = "${local.name}-eks-cluster"
cluster_version = local.cluster_version
cluster_endpoint_public_access = true

cluster_ip_family = "ipv4"

cluster_name = "${local.name}-eks-cluster"
cluster_version = "1.26"
cluster_endpoint_public_access = var.cluster_endpoint_public_access
cluster_endpoint_private_access = var.cluster_endpoint_private_access
iam_role_use_name_prefix = var.iam_role_use_name_prefix
# Set this to true if AmazonEKS_CNI_IPv6_Policy policy is not available
create_cni_ipv6_iam_policy = false

# EKS Addons
cluster_addons = {
coredns = {}
kube-proxy = {}
vpc-cni = {}
vpc-cni = {
most_recent = true
before_compute = true
configuration_values = jsonencode({
env = {
ENABLE_PREFIX_DELEGATION = "true"
WARM_PREFIX_TARGET = "1"
}
})
}
}

vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets

eks_managed_node_groups = {
# manage_aws_auth_configmap = true
# create_aws_auth_configmap = true

eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
instance_types = ["t3.medium"]
disk_size = 20
iam_role_attach_cni_policy = true
use_custom_launch_template = false
iam_role_additional_policies = {
policy_arn = aws_iam_policy.node_additional.arn
AWSLoadBalancerControllerIAMPolicy = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/AWSLoadBalancerControllerIAMPolicy"
}
tags = {
"kubernetes.io/cluster/${module.eks.cluster_name}" = "shared"
}
}

eks_managed_node_groups = {
critical = {
name = "critical"
instance_types = ["t3.medium"]
Expand All @@ -65,6 +105,7 @@ module "eks" {
min_size = 1
max_size = 2
desired_size = 1

}

application = {
Expand All @@ -73,23 +114,147 @@ module "eks" {
use_name_prefix = false
capacity_type = "SPOT"
min_size = 0
max_size = 0
max_size = 1
desired_size = 0
}

}
tags = local.tags
}

################################################################################
# EKS Supporting Resources
################################################################################
data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}

module "vpc_cni_irsa" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.0"

role_name_prefix = "VPC-CNI-IRSA"
attach_vpc_cni_policy = true
vpc_cni_enable_ipv6 = true

oidc_providers = {
main = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:aws-node"]
}
}

tags = local.tags
}

resource "aws_iam_policy" "node_additional" {
name = "${local.name}-additional"
description = "Example usage of node additional policy"

policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"ec2:Describe*",
"autoscaling:Describe*",
"eks:Describe*",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"elasticloadbalancing:DescribeLoadBalancers"
]
Effect = "Allow"
Resource = "*"
},
]
})

tags = local.tags
}

data "aws_ami" "eks_default" {
most_recent = true
owners = ["amazon"]

filter {
name = "name"
values = ["amazon-eks-node-${local.cluster_version}-v*"]
}
}

data "aws_ami" "eks_default_arm" {
most_recent = true
owners = ["amazon"]

filter {
name = "name"
values = ["amazon-eks-arm64-node-${local.cluster_version}-v*"]
}
}

resource "local_file" "kubeconfig" {
depends_on = [
module.eks.cluster_id
]
content = <<EOF
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${module.eks.cluster_certificate_authority_data}
server: ${module.eks.cluster_endpoint}
name: ${module.eks.cluster_arn}
contexts:
- context:
cluster: ${module.eks.cluster_arn}
user: ${module.eks.cluster_arn}
name: ${module.eks.cluster_arn}
current-context: ${module.eks.cluster_arn}
kind: Config
preferences: {}
users:
- name: ${module.eks.cluster_arn}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
args:
- --region
- ${local.region}
- eks
- get-token
- --cluster-name
- ${module.eks.cluster_name}
command: aws
EOF
filename = "${path.cwd}/config/kubeconfig"
}

resource "null_resource" "kubectl" {
depends_on = [module.eks]
provisioner "local-exec" {
command = "export KUBE_CONFIG_PATH=${path.cwd}/config/kubeconfig && aws eks update-kubeconfig --name ${module.eks.cluster_name} --region ${local.region}"
}
}

# resource "null_resource" "kubectl" {
# depends_on = [ module.eks ]
# provisioner "local-exec" {
# command = "aws sts assume-role --role-arn arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/terraform-helm-eks-addon} --role-session-name AWSCLI-Session"
# }
# }

module "addons" {
source = "../../"
source = "../../addons"
#version = "0.0.1"
name = local.name
environment = local.environment
eks_cluster_name = module.eks.cluster_name
vpc_id = module.vpc.vpc_id
kms_key_arn = ""
worker_iam_role_name = module.eks.worker_iam_role_name
kms_policy_arn = module.eks.kms_policy_arn # eks module will create kms_policy_arn
# Addons
metrics_server_enabled = false
}

depends_on = [null_resource.kubectl]
# depends_on = [ module.eks.cluster_id ]

eks_cluster_id = module.eks.cluster_id
eks_cluster_name = module.eks.cluster_name

enable_metrics_server = true
enable_cluster_autoscaler = true
enable_aws_load_balancer_controller = true

}
9 changes: 9 additions & 0 deletions _examples/complete/outputs.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
# ------------------------------------------------------------------------------
# Outputs
# ------------------------------------------------------------------------------
output "module_path" {
value = path.module
}
output "cwd" {
value = path.cwd
}
output "KUBE_CONFIG_PATH" {
value = "~/.kube/config"
}
15 changes: 15 additions & 0 deletions _examples/complete/providers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
provider "aws" {
region = local.region
}

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
config_context = module.eks.cluster_arn
config_path = "${path.cwd}/config/kubeconfig"
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
command = "aws"
}
}
22 changes: 22 additions & 0 deletions _examples/complete/variables.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,25 @@
# ------------------------------------------------------------------------------
# Variables
# ------------------------------------------------------------------------------

variable "region" {
type = string
default = "us-east-1"
description = "Region Code"
}

variable "cluster_endpoint_public_access" {
type = bool
default = true
}

variable "cluster_endpoint_private_access" {
type = bool
default = true
}

variable "iam_role_use_name_prefix" {
type = string
default = "terraform-helm-eks-addons"
}

4 changes: 4 additions & 0 deletions _examples/complete/versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,5 +13,9 @@ terraform {
source = "hashicorp/helm"
version = ">= 2.6"
}
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.7.0"
}
}
}
Loading

0 comments on commit f96e375

Please sign in to comment.