From 394313ee931e38bc20fc3dcb1cd2fd1b6f66822e Mon Sep 17 00:00:00 2001 From: Vincent Lesierse Date: Mon, 23 Dec 2019 17:54:21 +0100 Subject: [PATCH] feat(eks): helm chart support (#5390) * Added HelmRelease construct * feat(eks): Add HelmRelease construct * Fix some linting problems * Remove trailing whitespace * Add the possibility to specify the chart version * Changes after code review * Add shell=True to command execution * Execute helm command in /tmp * Write a correct values.yaml * Add resources to integration tests * Change require to import * Lazy add HelmChartHandler * Add integration tests for Helm * Added convenience addChart to Cluster * Fix integration test. * Change addChart method to use options pattern * Added @default and truncate default chart name * Added the Helm entry to the README.md Co-authored-by: Elad Ben-Israel Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- packages/@aws-cdk/aws-eks/README.md | 39 +- packages/@aws-cdk/aws-eks/lib/cluster.ts | 17 +- packages/@aws-cdk/aws-eks/lib/helm-chart.ts | 123 ++ .../@aws-cdk/aws-eks/lib/helm-chart/index.py | 136 ++ packages/@aws-cdk/aws-eks/lib/index.ts | 1 + .../@aws-cdk/aws-eks/lib/kubectl-layer.ts | 2 +- .../test/integ.eks-helm.lit.expected.json | 1336 +++++++++++++++++ .../aws-eks/test/integ.eks-helm.lit.ts | 54 + .../@aws-cdk/aws-eks/test/test.cluster.ts | 1 + .../@aws-cdk/aws-eks/test/test.helm-chart.ts | 55 + packages/@aws-cdk/aws-eks/test/util.ts | 8 + 11 files changed, 1769 insertions(+), 3 deletions(-) create mode 100644 packages/@aws-cdk/aws-eks/lib/helm-chart.ts create mode 100644 packages/@aws-cdk/aws-eks/lib/helm-chart/index.py create mode 100644 packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.expected.json create mode 100644 packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.ts create mode 100644 packages/@aws-cdk/aws-eks/test/test.helm-chart.ts diff --git a/packages/@aws-cdk/aws-eks/README.md b/packages/@aws-cdk/aws-eks/README.md index 0321d68a8d057..c4c7d3caa1f97 100644 --- a/packages/@aws-cdk/aws-eks/README.md +++ b/packages/@aws-cdk/aws-eks/README.md @@ -381,7 +381,44 @@ When kubectl is disabled, you should be aware of the following: edit the [aws-auth ConfigMap](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) when you add capacity in order to map the IAM instance role to RBAC to allow nodes to join the cluster. 3. Any `eks.Cluster` APIs that depend on programmatic kubectl support will fail - with an error: `cluster.addResource`, `cluster.awsAuth`, `props.mastersRole`. + with an error: `cluster.addResource`, `cluster.addChart`, `cluster.awsAuth`, `props.mastersRole`. + +### Helm Charts + +The `HelmChart` construct or `cluster.addChart` method can be used +to add Kubernetes resources to this cluster using Helm. + +The following example will install the [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/) +to you cluster using Helm. + +```ts +// option 1: use a construct +new HelmChart(this, 'NginxIngress', { + cluster, + chart: 'nginx-ingress', + repository: 'https://helm.nginx.com/stable', + namespace: 'kube-system' +}); + +// or, option2: use `addChart` +cluster.addChart('NginxIngress', { + chart: 'nginx-ingress', + repository: 'https://helm.nginx.com/stable', + namespace: 'kube-system' +}); +``` + +Helm charts will be installed and updated using `helm upgrade --install`. +This means that if the chart is added to CDK with the same release name, it will try to update +the chart in the cluster. The chart will exists as CloudFormation resource. + +Helm charts are implemented as CloudFormation resources in CDK. +This means that if the chart is deleted from your code (or the stack is +deleted), the next `cdk deploy` will issue a `helm uninstall` command and the +Helm chart will be deleted. + +When there is no `release` defined, the chart will be installed using the `node.uniqueId`, +which will be lower cassed and truncated to the last 63 characters. ### Roadmap diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 14753bd7fdc77..97d11787ad45c 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -8,6 +8,7 @@ import * as path from 'path'; import { AwsAuth } from './aws-auth'; import { ClusterResource } from './cluster-resource'; import { CfnCluster, CfnClusterProps } from './eks.generated'; +import { HelmChart, HelmChartOptions } from './helm-chart'; import { KubernetesResource } from './k8s-resource'; import { KubectlLayer } from './kubectl-layer'; import { spotInterruptHandler } from './spot-interrupt-handler'; @@ -309,8 +310,10 @@ export class Cluster extends Resource implements ICluster { * automatically added by Amazon EKS to the `system:masters` RBAC group of the * cluster. Use `addMastersRole` or `props.mastersRole` to define additional * IAM roles as administrators. + * + * @internal */ - private readonly _defaultMastersRole?: iam.IRole; + public readonly _defaultMastersRole?: iam.IRole; /** * Manages the aws-auth config map. @@ -579,6 +582,18 @@ export class Cluster extends Resource implements ICluster { return new KubernetesResource(this, `manifest-${id}`, { cluster: this, manifest }); } + /** + * Defines a Helm chart in this cluster. + * + * @param id logical id of this chart. + * @param options options of this chart. + * @returns a `HelmChart` object + * @throws If `kubectlEnabled` is `false` + */ + public addChart(id: string, options: HelmChartOptions) { + return new HelmChart(this, `chart-${id}`, { cluster: this, ...options }); + } + private createKubernetesResourceHandler() { if (!this.kubectlEnabled) { return undefined; diff --git a/packages/@aws-cdk/aws-eks/lib/helm-chart.ts b/packages/@aws-cdk/aws-eks/lib/helm-chart.ts new file mode 100644 index 0000000000000..051b13774a3cf --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/helm-chart.ts @@ -0,0 +1,123 @@ +import { CustomResource, CustomResourceProvider } from '@aws-cdk/aws-cloudformation'; +import * as lambda from '@aws-cdk/aws-lambda'; +import { Construct, Duration, Stack } from '@aws-cdk/core'; +import * as path from 'path'; +import { Cluster } from './cluster'; +import { KubectlLayer } from './kubectl-layer'; + +/** + * Helm Chart options. + */ + +export interface HelmChartOptions { + /** + * The name of the chart. + */ + readonly chart: string; + + /** + * The name of the release. + * @default - If no release name is given, it will use the last 63 characters of the node's unique id. + */ + readonly release?: string; + + /** + * The chart version to install. + * @default - If this is not specified, the latest version is installed + */ + readonly version?: string; + + /** + * The repository which contains the chart. For example: https://kubernetes-charts.storage.googleapis.com/ + * @default - No repository will be used, which means that the chart needs to be an absolute URL. + */ + readonly repository?: string; + + /** + * The Kubernetes namespace scope of the requests. + * @default default + */ + readonly namespace?: string; + + /** + * The values to be used by the chart. + * @default - No values are provided to the chart. + */ + readonly values?: {[key: string]: any}; +} + +/** + * Helm Chart properties. + */ +export interface HelmChartProps extends HelmChartOptions { + /** + * The EKS cluster to apply this configuration to. + * + * [disable-awslint:ref-via-interface] + */ + readonly cluster: Cluster; +} + +/** + * Represents a helm chart within the Kubernetes system. + * + * Applies/deletes the resources using `kubectl` in sync with the resource. + */ +export class HelmChart extends Construct { + /** + * The CloudFormation reosurce type. + */ + public static readonly RESOURCE_TYPE = 'Custom::AWSCDK-EKS-HelmChart'; + + constructor(scope: Construct, id: string, props: HelmChartProps) { + super(scope, id); + + const stack = Stack.of(this); + + // we maintain a single manifest custom resource handler for each cluster + const handler = this.getOrCreateHelmChartHandler(props.cluster); + if (!handler) { + throw new Error(`Cannot define a Helm chart on a cluster with kubectl disabled`); + } + + new CustomResource(this, 'Resource', { + provider: CustomResourceProvider.lambda(handler), + resourceType: HelmChart.RESOURCE_TYPE, + properties: { + Release: props.release || this.node.uniqueId.slice(-63).toLowerCase(), // Helm has a 63 character limit for the name + Chart: props.chart, + Version: props.version, + Values: (props.values ? stack.toJsonString(props.values) : undefined), + Namespace: props.namespace || 'default', + Repository: props.repository + } + }); + } + + private getOrCreateHelmChartHandler(cluster: Cluster): lambda.IFunction | undefined { + if (!cluster.kubectlEnabled) { + return undefined; + } + + let handler = cluster.node.tryFindChild('HelmChartHandler') as lambda.IFunction; + if (!handler) { + handler = new lambda.Function(cluster, 'HelmChartHandler', { + code: lambda.Code.fromAsset(path.join(__dirname, 'helm-chart')), + runtime: lambda.Runtime.PYTHON_3_7, + handler: 'index.handler', + timeout: Duration.minutes(15), + layers: [ KubectlLayer.getOrCreate(this, { version: "2.0.0-beta1" }) ], + memorySize: 256, + environment: { + CLUSTER_NAME: cluster.clusterName, + }, + + // NOTE: we must use the default IAM role that's mapped to "system:masters" + // as the execution role of this custom resource handler. This is the only + // way to be able to interact with the cluster after it's been created. + role: cluster._defaultMastersRole, + }); + } + return handler; + } +} diff --git a/packages/@aws-cdk/aws-eks/lib/helm-chart/index.py b/packages/@aws-cdk/aws-eks/lib/helm-chart/index.py new file mode 100644 index 0000000000000..0b311f61e0fcd --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/helm-chart/index.py @@ -0,0 +1,136 @@ +import subprocess +import os +import json +import logging +import boto3 +from uuid import uuid4 +from botocore.vendored import requests + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +# these are coming from the kubectl layer +os.environ['PATH'] = '/opt/helm:/opt/awscli:' + os.environ['PATH'] + +outdir = os.environ.get('TEST_OUTDIR', '/tmp') +kubeconfig = os.path.join(outdir, 'kubeconfig') + +CFN_SUCCESS = "SUCCESS" +CFN_FAILED = "FAILED" + +def handler(event, context): + + def cfn_error(message=None): + logger.error("| cfn_error: %s" % message) + cfn_send(event, context, CFN_FAILED, reason=message) + + try: + logger.info(json.dumps(event)) + + request_type = event['RequestType'] + props = event['ResourceProperties'] + physical_id = event.get('PhysicalResourceId', None) + release = props['Release'] + chart = props['Chart'] + version = props.get('Version', None) + namespace = props.get('Namespace', None) + repository = props.get('Repository', None) + values_text = props.get('Values', None) + + cluster_name = os.environ.get('CLUSTER_NAME', None) + if cluster_name is None: + cfn_error("CLUSTER_NAME is missing in environment") + return + + subprocess.check_call([ 'aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + # Write out the values to a file and include them with the install and upgrade + values_file = None + if not request_type == "Delete" and not values_text is None: + values = json.loads(values_text) + values_file = os.path.join(outdir, 'values.yaml') + with open(values_file, "w") as f: + f.write(json.dumps(values, indent=2)) + + if request_type == 'Create' or request_type == 'Update': + helm('upgrade', release, chart, repository, values_file, namespace, version) + elif request_type == "Delete": + try: + helm('uninstall', release, namespace=namespace) + except Exception as e: + logger.info("delete error: %s" % e) + + # if we are creating a new resource, allocate a physical id for it + # otherwise, we expect physical id to be relayed by cloudformation + if request_type == 'Create': + physical_id = "%s/%s" % (cluster_name, str(uuid4())) + else: + if not physical_id: + cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type) + return + + cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id) + return + + except KeyError as e: + cfn_error("invalid request. Missing '%s'" % str(e)) + except Exception as e: + logger.exception(e) + cfn_error(str(e)) + +def helm(verb, release, chart = None, repo = None, file = None, namespace = None, version = None): + import subprocess + try: + cmnd = ['helm', verb, release] + if not chart is None: + cmnd.append(chart) + if verb == 'upgrade': + cmnd.append('--install') + if not repo is None: + cmnd.extend(['--repo', repo]) + if not file is None: + cmnd.extend(['--values', file]) + if not version is None: + cmnd.extend(['--version', version]) + if not namespace is None: + cmnd.extend(['--namespace', namespace]) + cmnd.extend(['--kubeconfig', kubeconfig]) + output = subprocess.check_output(cmnd, stderr=subprocess.STDOUT, cwd=outdir) + logger.info(output) + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + +#--------------------------------------------------------------------------------------------------- +# sends a response to cloudformation +def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None): + + responseUrl = event['ResponseURL'] + logger.info(responseUrl) + + responseBody = {} + responseBody['Status'] = responseStatus + responseBody['Reason'] = reason or ('See the details in CloudWatch Log Stream: ' + context.log_stream_name) + responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name + responseBody['StackId'] = event['StackId'] + responseBody['RequestId'] = event['RequestId'] + responseBody['LogicalResourceId'] = event['LogicalResourceId'] + responseBody['NoEcho'] = noEcho + responseBody['Data'] = responseData + + body = json.dumps(responseBody) + logger.info("| response body:\n" + body) + + headers = { + 'content-type' : '', + 'content-length' : str(len(body)) + } + + try: + response = requests.put(responseUrl, data=body, headers=headers) + logger.info("| status code: " + response.reason) + except Exception as e: + logger.error("| unable to send response to CloudFormation") + logger.exception(e) diff --git a/packages/@aws-cdk/aws-eks/lib/index.ts b/packages/@aws-cdk/aws-eks/lib/index.ts index f894490485013..166d5bd35e5fc 100644 --- a/packages/@aws-cdk/aws-eks/lib/index.ts +++ b/packages/@aws-cdk/aws-eks/lib/index.ts @@ -1,6 +1,7 @@ export * from './cluster'; export * from './aws-auth-mapping'; export * from './k8s-resource'; +export * from './helm-chart'; export * from './aws-auth'; // AWS::EKS CloudFormation Resources: diff --git a/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts b/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts index 7bbb379aed3bd..211f6d8b36abd 100644 --- a/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts +++ b/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts @@ -26,7 +26,7 @@ export class KubectlLayer extends Construct implements lambda.ILayerVersion { */ public static getOrCreate(scope: Construct, props: KubectlLayerProps = {}): KubectlLayer { const stack = Stack.of(scope); - const id = 'kubectl-layer-8C2542BC-BF2B-4DFE-B765-E181FD30A9A0'; + const id = 'kubectl-layer-' + (props.version ? props.version : "8C2542BC-BF2B-4DFE-B765-E181FD30A9A0"); const exists = stack.node.tryFindChild(id) as KubectlLayer; if (exists) { return exists; diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.expected.json new file mode 100644 index 0000000000000..10172624f8f2e --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.expected.json @@ -0,0 +1,1336 @@ +[ + { + "Resources": { + "vpcA2121C38": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": true, + "EnableDnsSupport": true, + "InstanceTenancy": "default", + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc" + } + ] + } + }, + "vpcPublicSubnet1Subnet2E65531E": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet1RouteTable48A2DF9B": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet1RouteTableAssociation5D3F4579": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet1RouteTable48A2DF9B" + }, + "SubnetId": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + } + } + }, + "vpcPublicSubnet1DefaultRoute10708846": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet1RouteTable48A2DF9B" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "vpcIGWE57CBDCA" + } + }, + "DependsOn": [ + "vpcVPCGW7984C166" + ] + }, + "vpcPublicSubnet1EIPDA49DCBE": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet1NATGateway9C16659E": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "vpcPublicSubnet1EIPDA49DCBE", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet2Subnet009B674F": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.64.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet2RouteTableEB40D4CB": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet2RouteTableAssociation21F81B59": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet2RouteTableEB40D4CB" + }, + "SubnetId": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + } + } + }, + "vpcPublicSubnet2DefaultRouteA1EC0F60": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet2RouteTableEB40D4CB" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "vpcIGWE57CBDCA" + } + }, + "DependsOn": [ + "vpcVPCGW7984C166" + ] + }, + "vpcPublicSubnet2EIP9B3743B1": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPublicSubnet2NATGateway9B8AE11A": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "vpcPublicSubnet2EIP9B3743B1", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + }, + { + "Key": "kubernetes.io/role/elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet1Subnet934893E8": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.128.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet1" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet1RouteTableB41A48CC": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet1" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet1RouteTableAssociation67945127": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet1RouteTableB41A48CC" + }, + "SubnetId": { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + } + } + }, + "vpcPrivateSubnet1DefaultRoute1AA8E2E5": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet1RouteTableB41A48CC" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "vpcPublicSubnet1NATGateway9C16659E" + } + } + }, + "vpcPrivateSubnet2Subnet7031C2BA": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.192.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet2" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet2RouteTable7280F23E": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet2" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet2RouteTableAssociation007E94D3": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet2RouteTable7280F23E" + }, + "SubnetId": { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + } + } + }, + "vpcPrivateSubnet2DefaultRouteB0E07F99": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet2RouteTable7280F23E" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "vpcPublicSubnet2NATGateway9B8AE11A" + } + } + }, + "vpcIGWE57CBDCA": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc" + } + ] + } + }, + "vpcVPCGW7984C166": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "InternetGatewayId": { + "Ref": "vpcIGWE57CBDCA" + } + } + } + }, + "Outputs": { + "ExportsOutputRefvpcA2121C384D1B3CDE": { + "Value": { + "Ref": "vpcA2121C38" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcA2121C384D1B3CDE" + } + }, + "ExportsOutputRefvpcPublicSubnet1Subnet2E65531ECCB85041": { + "Value": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPublicSubnet1Subnet2E65531ECCB85041" + } + }, + "ExportsOutputRefvpcPublicSubnet2Subnet009B674FB900C242": { + "Value": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPublicSubnet2Subnet009B674FB900C242" + } + }, + "ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271": { + "Value": { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271" + } + }, + "ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE": { + "Value": { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE" + } + } + } + }, + { + "Transform": "AWS::Serverless-2016-10-31", + "Resources": { + "cluster22ClusterRole5FC933B4": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSClusterPolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSServicePolicy" + ] + ] + } + ] + } + }, + "cluster22ControlPlaneSecurityGroup2648B9CD": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "EKS Control Plane Security Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcA2121C384D1B3CDE" + } + } + }, + "cluster22ControlPlaneSecurityGroupfromk8sclustercluster22NodesInstanceSecurityGroupF903AE86443C3EDA943": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from k8sclustercluster22NodesInstanceSecurityGroupF903AE86:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "cluster22ResourceHandlerServiceRoleC2E4F327": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "eks:CreateCluster", + "eks:DescribeCluster", + "eks:DeleteCluster", + "eks:UpdateClusterVersion" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "cluster22ClusterRole5FC933B4", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "Roles": [ + { + "Ref": "cluster22ResourceHandlerServiceRoleC2E4F327" + } + ] + } + }, + "cluster22ResourceHandler6227579A": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204S3Bucket371D99F8" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204S3VersionKeyFDCB25DD" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204S3VersionKeyFDCB25DD" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "cluster22ResourceHandlerServiceRoleC2E4F327", + "Arn" + ] + }, + "Runtime": "python3.7", + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 512, + "Timeout": 900 + }, + "DependsOn": [ + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "cluster22ResourceHandlerServiceRoleC2E4F327" + ] + }, + "cluster227BD1CB20": { + "Type": "Custom::AWSCDK-EKS-Cluster", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22ResourceHandler6227579A", + "Arn" + ] + }, + "Config": { + "roleArn": { + "Fn::GetAtt": [ + "cluster22ClusterRole5FC933B4", + "Arn" + ] + }, + "resourcesVpcConfig": { + "securityGroupIds": [ + { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + } + ], + "subnetIds": [ + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPublicSubnet1Subnet2E65531ECCB85041" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPublicSubnet2Subnet009B674FB900C242" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE" + } + ] + } + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "cluster22KubernetesResourceHandler599F07E6": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444S3Bucket919126CB" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444S3VersionKey529BEF54" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444S3VersionKey529BEF54" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "cluster22ResourceHandlerServiceRoleC2E4F327", + "Arn" + ] + }, + "Runtime": "python3.7", + "Environment": { + "Variables": { + "CLUSTER_NAME": { + "Ref": "cluster227BD1CB20" + } + } + }, + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 256, + "Timeout": 900 + }, + "DependsOn": [ + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "cluster22ResourceHandlerServiceRoleC2E4F327" + ] + }, + "cluster22AwsAuthmanifest4685C84D": { + "Type": "Custom::AWSCDK-EKS-KubernetesResource", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22KubernetesResourceHandler599F07E6", + "Arn" + ] + }, + "Manifest": { + "Fn::Join": [ + "", + [ + "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"aws-auth\",\"namespace\":\"kube-system\"},\"data\":{\"mapRoles\":\"[{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "AdminRole38563C57", + "Arn" + ] + }, + "\\\",\\\"groups\\\":[\\\"system:masters\\\"]},{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "cluster22NodesInstanceRole51CD052F", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]}]\",\"mapUsers\":\"[]\",\"mapAccounts\":\"[]\"}}]" + ] + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "cluster22NodesInstanceSecurityGroup4A3CDC24": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "k8s-cluster/cluster22/Nodes/InstanceSecurityGroup", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "k8s-cluster/cluster22/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "cluster227BD1CB20" + } + ] + ] + }, + "Value": "owned" + } + ], + "VpcId": { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcA2121C384D1B3CDE" + } + } + }, + "cluster22NodesInstanceSecurityGroupfromk8sclustercluster22NodesInstanceSecurityGroupF903AE86ALLTRAFFIC774C7781": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "-1", + "Description": "from k8sclustercluster22NodesInstanceSecurityGroupF903AE86:ALL TRAFFIC", + "GroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + } + } + }, + "cluster22NodesInstanceSecurityGroupfromk8sclustercluster22ControlPlaneSecurityGroup3B5F21B44434A6E344D": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from k8sclustercluster22ControlPlaneSecurityGroup3B5F21B4:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "cluster22NodesInstanceSecurityGroupfromk8sclustercluster22ControlPlaneSecurityGroup3B5F21B41025655355658FCAA": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from k8sclustercluster22ControlPlaneSecurityGroup3B5F21B4:1025-65535", + "FromPort": 1025, + "GroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + }, + "ToPort": 65535 + } + }, + "cluster22NodesInstanceRole51CD052F": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSWorkerNodePolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKS_CNI_Policy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ] + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "k8s-cluster/cluster22/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "cluster227BD1CB20" + } + ] + ] + }, + "Value": "owned" + } + ] + } + }, + "cluster22NodesInstanceProfile3D4963ED": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "Roles": [ + { + "Ref": "cluster22NodesInstanceRole51CD052F" + } + ] + } + }, + "cluster22NodesLaunchConfig184BF3BA": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": { + "Ref": "SsmParameterValueawsserviceeksoptimizedami114amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" + }, + "InstanceType": "t2.medium", + "IamInstanceProfile": { + "Ref": "cluster22NodesInstanceProfile3D4963ED" + }, + "SecurityGroups": [ + { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + } + ], + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ", + { + "Ref": "cluster227BD1CB20" + }, + " --kubelet-extra-args \"--node-labels lifecycle=OnDemand\" --use-max-pods true\n/opt/aws/bin/cfn-signal --exit-code $? --stack k8s-cluster --resource cluster22NodesASGC0A97398 --region test-region" + ] + ] + } + } + }, + "DependsOn": [ + "cluster22NodesInstanceRole51CD052F" + ] + }, + "cluster22NodesASGC0A97398": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "MaxSize": "3", + "MinSize": "1", + "DesiredCapacity": "3", + "LaunchConfigurationName": { + "Ref": "cluster22NodesLaunchConfig184BF3BA" + }, + "Tags": [ + { + "Key": "Name", + "PropagateAtLaunch": true, + "Value": "k8s-cluster/cluster22/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "cluster227BD1CB20" + } + ] + ] + }, + "PropagateAtLaunch": true, + "Value": "owned" + } + ], + "VPCZoneIdentifier": [ + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE" + } + ] + }, + "UpdatePolicy": { + "AutoScalingRollingUpdate": { + "WaitOnResourceSignals": false, + "PauseTime": "PT0S", + "SuspendProcesses": [ + "HealthCheck", + "ReplaceUnhealthy", + "AZRebalance", + "AlarmNotification", + "ScheduledActions" + ] + }, + "AutoScalingScheduledAction": { + "IgnoreUnmodifiedGroupSizeProperties": true + } + } + }, + "cluster22chartdashboard616811AB": { + "Type": "Custom::AWSCDK-EKS-HelmChart", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22HelmChartHandler0BAF302E", + "Arn" + ] + }, + "Release": "k8sclustercluster22chartdashboard3844c297", + "Chart": "kubernetes-dashboard", + "Namespace": "default", + "Repository": "https://kubernetes-charts.storage.googleapis.com" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "cluster22HelmChartHandler0BAF302E": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653S3BucketD01BFA78" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653S3VersionKeyD67E9179" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653S3VersionKeyD67E9179" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "cluster22ResourceHandlerServiceRoleC2E4F327", + "Arn" + ] + }, + "Runtime": "python3.7", + "Environment": { + "Variables": { + "CLUSTER_NAME": { + "Ref": "cluster227BD1CB20" + } + } + }, + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer200beta1B9303363", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 256, + "Timeout": 900 + }, + "DependsOn": [ + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "cluster22ResourceHandlerServiceRoleC2E4F327" + ] + }, + "cluster22chartnginxingress90C2D506": { + "Type": "Custom::AWSCDK-EKS-HelmChart", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22HelmChartHandler0BAF302E", + "Arn" + ] + }, + "Release": "k8sclustercluster22chartnginxingress8b03389e", + "Chart": "nginx-ingress", + "Namespace": "kube-system", + "Repository": "https://helm.nginx.com/stable" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA": { + "Type": "AWS::Serverless::Application", + "Properties": { + "Location": { + "ApplicationId": "arn:aws:serverlessrepo:us-east-1:903779448426:applications/lambda-layer-kubectl", + "SemanticVersion": "1.13.7" + }, + "Parameters": { + "LayerName": "kubectl-bedb92f2e70f45155fba70d3425dd148" + } + } + }, + "AdminRole38563C57": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::12345678:root" + ] + ] + } + } + } + ], + "Version": "2012-10-17" + } + } + }, + "kubectllayer200beta1B9303363": { + "Type": "AWS::Serverless::Application", + "Properties": { + "Location": { + "ApplicationId": "arn:aws:serverlessrepo:us-east-1:903779448426:applications/lambda-layer-kubectl", + "SemanticVersion": "2.0.0-beta1" + }, + "Parameters": { + "LayerName": "kubectl-aa3d1881d348da39094e6b1ce165f580" + } + } + } + }, + "Outputs": { + "cluster22ConfigCommand96B20279": { + "Value": { + "Fn::Join": [ + "", + [ + "aws eks update-kubeconfig --name ", + { + "Ref": "cluster227BD1CB20" + }, + " --region test-region" + ] + ] + } + }, + "cluster22GetTokenCommand99DB9B02": { + "Value": { + "Fn::Join": [ + "", + [ + "aws eks get-token --cluster-name ", + { + "Ref": "cluster227BD1CB20" + }, + " --region test-region" + ] + ] + } + } + }, + "Parameters": { + "AssetParametersea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204S3Bucket371D99F8": { + "Type": "String", + "Description": "S3 bucket for asset \"ea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204\"" + }, + "AssetParametersea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204S3VersionKeyFDCB25DD": { + "Type": "String", + "Description": "S3 key for asset version \"ea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204\"" + }, + "AssetParametersea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204ArtifactHashB80B497F": { + "Type": "String", + "Description": "Artifact hash for asset \"ea4957b16062595851e7d293ee45835db05c5693669a729cc02944b6ad19a204\"" + }, + "AssetParameters640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444S3Bucket919126CB": { + "Type": "String", + "Description": "S3 bucket for asset \"640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444\"" + }, + "AssetParameters640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444S3VersionKey529BEF54": { + "Type": "String", + "Description": "S3 key for asset version \"640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444\"" + }, + "AssetParameters640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444ArtifactHash606C8127": { + "Type": "String", + "Description": "Artifact hash for asset \"640847533c8a00b3133aeb128edcac41fb7b60349c9e18764fcf7ea4af14d444\"" + }, + "AssetParameters8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653S3BucketD01BFA78": { + "Type": "String", + "Description": "S3 bucket for asset \"8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653\"" + }, + "AssetParameters8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653S3VersionKeyD67E9179": { + "Type": "String", + "Description": "S3 key for asset version \"8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653\"" + }, + "AssetParameters8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653ArtifactHash77099D9F": { + "Type": "String", + "Description": "Artifact hash for asset \"8e2989bd32b411eba804b201a0f3984c984893c7fe6daa0b572fdd59c63e3653\"" + }, + "SsmParameterValueawsserviceeksoptimizedami114amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { + "Type": "AWS::SSM::Parameter::Value", + "Default": "/aws/service/eks/optimized-ami/1.14/amazon-linux-2/recommended/image_id" + } + } + } +] \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.ts new file mode 100644 index 0000000000000..35113435a740b --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-helm.lit.ts @@ -0,0 +1,54 @@ +/// !cdk-integ * + +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as iam from '@aws-cdk/aws-iam'; +import { App, Construct } from '@aws-cdk/core'; +import { Cluster } from '../lib'; +import { TestStack } from './util'; + +class VpcStack extends TestStack { + public readonly vpc: ec2.Vpc; + + constructor(scope: Construct, id: string) { + super(scope, id); + this.vpc = new ec2.Vpc(this, 'vpc', { maxAzs: 2 }); + } +} + +class ClusterStack extends TestStack { + public readonly cluster: Cluster; + + constructor(scope: Construct, id: string, props: { vpc: ec2.Vpc }) { + super(scope, id); + + /// !show + // define the cluster. kubectl is enabled by default. + this.cluster = new Cluster(this, 'cluster22', { + vpc: props.vpc, + defaultCapacity: 0, + }); + + // define an IAM role assumable by anyone in the account and map it to the k8s + // `system:masters` group this is required if you want to be able to issue + // manual `kubectl` commands against the cluster. + const mastersRole = new iam.Role(this, 'AdminRole', { assumedBy: new iam.AccountRootPrincipal() }); + this.cluster.awsAuth.addMastersRole(mastersRole); + + // add some capacity to the cluster. The IAM instance role will + // automatically be mapped via aws-auth to allow nodes to join the cluster. + this.cluster.addCapacity('Nodes', { + instanceType: new ec2.InstanceType('t2.medium'), + desiredCapacity: 3, + }); + + // add two Helm charts to the cluster. This will be the Kubernetes dashboard and the Nginx Ingress Controller + this.cluster.addChart('dashboard', { chart: 'kubernetes-dashboard', repository: 'https://kubernetes-charts.storage.googleapis.com' }); + this.cluster.addChart('nginx-ingress', { chart: 'nginx-ingress', repository: 'https://helm.nginx.com/stable', namespace: 'kube-system' }); + /// !hide + } +} + +const app = new App(); +const vpcStack = new VpcStack(app, 'k8s-vpc'); +new ClusterStack(app, 'k8s-cluster', { vpc: vpcStack.vpc }); +app.synth(); diff --git a/packages/@aws-cdk/aws-eks/test/test.cluster.ts b/packages/@aws-cdk/aws-eks/test/test.cluster.ts index 305058068f7f3..63934c1319046 100644 --- a/packages/@aws-cdk/aws-eks/test/test.cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/test.cluster.ts @@ -203,6 +203,7 @@ export = { test.throws(() => cluster.addResource('foo', {}), /Cannot define a KubernetesManifest resource on a cluster with kubectl disabled/); test.throws(() => cluster.addCapacity('boo', { instanceType: new ec2.InstanceType('r5d.24xlarge'), mapRole: true }), /Cannot map instance IAM role to RBAC if kubectl is disabled for the cluster/); + test.throws(() => new eks.HelmChart(stack, 'MyChart', { cluster, chart: 'chart' }), /Cannot define a Helm chart on a cluster with kubectl disabled/); test.done(); }, diff --git a/packages/@aws-cdk/aws-eks/test/test.helm-chart.ts b/packages/@aws-cdk/aws-eks/test/test.helm-chart.ts new file mode 100644 index 0000000000000..8d36452d2be07 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/test.helm-chart.ts @@ -0,0 +1,55 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Test } from 'nodeunit'; +import * as eks from '../lib'; +import { testFixtureCluster } from './util'; + +// tslint:disable:max-line-length + +export = { + 'add Helm chart': { + 'should have default namespace'(test: Test) { + // GIVEN + const { stack, cluster } = testFixtureCluster(); + + // WHEN + new eks.HelmChart(stack, 'MyChart', { cluster, chart: 'chart' }); + + // THEN + expect(stack).to(haveResource(eks.HelmChart.RESOURCE_TYPE, { Namespace: 'default' })); + test.done(); + }, + 'should have a lowercase default release name'(test: Test) { + // GIVEN + const { stack, cluster } = testFixtureCluster(); + + // WHEN + new eks.HelmChart(stack, 'MyChart', { cluster, chart: 'chart' }); + + // THEN + expect(stack).to(haveResource(eks.HelmChart.RESOURCE_TYPE, { Release: 'stackmychartff398361' })); + test.done(); + }, + 'should trim the last 63 of the default release name'(test: Test) { + // GIVEN + const { stack, cluster } = testFixtureCluster(); + + // WHEN + new eks.HelmChart(stack, 'MyChartNameWhichISMostProbablyLongerThenSixtyThreeCharacters', { cluster, chart: 'chart' }); + + // THEN + expect(stack).to(haveResource(eks.HelmChart.RESOURCE_TYPE, { Release: 'rtnamewhichismostprobablylongerthensixtythreecharactersb800614d' })); + test.done(); + }, + 'with values'(test: Test) { + // GIVEN + const { stack, cluster } = testFixtureCluster(); + + // WHEN + new eks.HelmChart(stack, 'MyChart', { cluster, chart: 'chart', values: { foo: 123 } }); + + // THEN + expect(stack).to(haveResource(eks.HelmChart.RESOURCE_TYPE, { Values: '{\"foo\":123}' })); + test.done(); + } + } +}; diff --git a/packages/@aws-cdk/aws-eks/test/util.ts b/packages/@aws-cdk/aws-eks/test/util.ts index a96d1d720ea84..6406e746d0a3e 100644 --- a/packages/@aws-cdk/aws-eks/test/util.ts +++ b/packages/@aws-cdk/aws-eks/test/util.ts @@ -1,5 +1,6 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import { App, Construct, Stack } from '@aws-cdk/core'; +import { Cluster } from '../lib'; export function testFixture() { const { stack, app } = testFixtureNoVpc(); @@ -14,6 +15,13 @@ export function testFixtureNoVpc() { return { stack, app }; } +export function testFixtureCluster() { + const { stack, app } = testFixtureNoVpc(); + const cluster = new Cluster(stack, 'Cluster'); + + return { stack, app, cluster }; +} + // we must specify an explicit environment because we have an AMI map that is // keyed from the target region. const env = {