diff --git a/examples/examples_nodejs_test.go b/examples/examples_nodejs_test.go index 3d68b4bb6..bd269d69f 100644 --- a/examples/examples_nodejs_test.go +++ b/examples/examples_nodejs_test.go @@ -1043,3 +1043,21 @@ func TestAccNetworkPolicies(t *testing.T) { programTestWithExtraOptions(t, &test, nil) } + +func TestAccPodSecurityGroups(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + test := getJSBaseOptions(t). + With(integration.ProgramTestOptions{ + Dir: path.Join(getCwd(t), "pod-security-groups"), + ExtraRuntimeValidation: func(t *testing.T, info integration.RuntimeValidationStackInfo) { + utils.RunEKSSmokeTest(t, + info.Deployment.Resources, + info.Outputs["kubeconfig"], + ) + }, + }) + + programTestWithExtraOptions(t, &test, nil) +} diff --git a/examples/pod-security-groups/Pulumi.yaml b/examples/pod-security-groups/Pulumi.yaml new file mode 100644 index 000000000..9a39f26c7 --- /dev/null +++ b/examples/pod-security-groups/Pulumi.yaml @@ -0,0 +1,3 @@ +name: pod-security-groups +description: EKS cluster with pod security groups +runtime: nodejs diff --git a/examples/pod-security-groups/README.md b/examples/pod-security-groups/README.md new file mode 100755 index 000000000..f67f4db6f --- /dev/null +++ b/examples/pod-security-groups/README.md @@ -0,0 +1,33 @@ +# Pod Security Groups + +This example demonstrates how to set up Security Groups for Pods in an Amazon EKS cluster using Pulumi. + +## Topology + +```mermaid +graph TD + caller[caller Job] + wrongCaller[wrong-caller Job] + nginx[nginx] + coredns[corends] + caller -- Allow --> nginx + caller -- Allow --> coredns + wrongCaller -. Deny .-> nginx + wrongCaller -- Allow --> coredns + nginx -- Allow --> coredns +``` + +## Components + +1. **caller Job**: A Kubernetes Job that is allowed to communicate with both the nginx pod and CoreDNS. +2. **wrong-caller Job**: A Kubernetes Job that is allowed to communicate with CoreDNS but denied access to the nginx pod. +3. **nginx**: A web server pod that can communicate with CoreDNS. +4. **CoreDNS**: The cluster's DNS service, accessible by all pods. + +## Security Group Rules + +This example implements the following Security Group rules: + +1. Allow the `caller` Job to access both nginx and CoreDNS. +2. Deny the `wrong-caller` Job access to nginx, while allowing access to CoreDNS. +3. Allow nginx to access CoreDNS. diff --git a/examples/pod-security-groups/application.ts b/examples/pod-security-groups/application.ts new file mode 100644 index 000000000..0a677108e --- /dev/null +++ b/examples/pod-security-groups/application.ts @@ -0,0 +1,55 @@ +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +export function createApplication(name: string, sgId: pulumi.Output, provider: k8s.Provider): k8s.core.v1.Service { + const ns = new k8s.core.v1.Namespace(name, { + metadata: { name: name }, + }, { provider: provider }); + + const sgp = new k8s.apiextensions.CustomResource(name, { + apiVersion: "vpcresources.k8s.aws/v1beta1", + kind: "SecurityGroupPolicy", + metadata: { + name: name, + namespace: ns.metadata.name + }, + spec: { + podSelector: { matchLabels: { app: name } }, + securityGroups: { + groupIds: [sgId], + } + }, + }, { provider: provider }); + + const deployment = new k8s.apps.v1.Deployment(name, { + metadata: { + name: name, + namespace: ns.metadata.name + }, + spec: { + selector: { matchLabels: { app: name } }, + replicas: 1, + template: { + metadata: { labels: { app: name } }, + spec: { + containers: [{ + name: name, + image: "nginx", + ports: [{ containerPort: 80 }], + }], + }, + }, + }, + }, { provider: provider }); + + return new k8s.core.v1.Service(name, { + metadata: { + name: name, + namespace: ns.metadata.name + }, + spec: { + selector: { app: name }, + ports: [{ port: 80, targetPort: 80 }], + }, + }, { provider: provider, dependsOn: [deployment, sgp] }); +} diff --git a/examples/pod-security-groups/iam.ts b/examples/pod-security-groups/iam.ts new file mode 100644 index 000000000..214f5bfc0 --- /dev/null +++ b/examples/pod-security-groups/iam.ts @@ -0,0 +1,28 @@ +import * as aws from "@pulumi/aws"; +import * as pulumi from "@pulumi/pulumi"; + +const managedPolicyArns: string[] = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore", +]; + +// Creates a role and attches the EKS worker node IAM managed policies +export function createRole(name: string): aws.iam.Role { + const role = new aws.iam.Role(name, { + assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ + Service: "ec2.amazonaws.com", + }), + }); + + let counter = 0; + for (const policy of managedPolicyArns) { + // Create RolePolicyAttachment without returning it. + const rpa = new aws.iam.RolePolicyAttachment(`${name}-policy-${counter++}`, + { policyArn: policy, role: role }, + ); + } + + return role; +} diff --git a/examples/pod-security-groups/index.ts b/examples/pod-security-groups/index.ts new file mode 100644 index 000000000..d937deb26 --- /dev/null +++ b/examples/pod-security-groups/index.ts @@ -0,0 +1,201 @@ +import * as pulumi from "@pulumi/pulumi"; +import * as aws from "@pulumi/aws"; +import * as awsx from "@pulumi/awsx"; +import * as eks from "@pulumi/eks"; +import * as iam from "./iam"; +import * as k8s from "@pulumi/kubernetes"; +import * as app from "./application"; + +// IAM roles for the node groups. +const role = iam.createRole("pod-security-policies"); + +// Create a new VPC +const eksVpc = new awsx.ec2.Vpc("pod-security-policies", { + enableDnsHostnames: true, + cidrBlock: "10.0.0.0/16", +}); + +// Create an EKS cluster. +const cluster = new eks.Cluster("pod-security-policies", { + skipDefaultNodeGroup: true, + vpcId: eksVpc.vpcId, + authenticationMode: eks.AuthenticationMode.Api, + // Public subnets will be used for load balancers + publicSubnetIds: eksVpc.publicSubnetIds, + // Private subnets will be used for cluster nodes + privateSubnetIds: eksVpc.privateSubnetIds, + vpcCniOptions: { + // required for security groups for pods + enablePodEni: true, + // enables using liveness or readiness probes with security groups for pods + disableTcpEarlyDemux: true, + configurationValues: { + env: { + // all inbound/outbound traffic from pod with security group will be enforced by security group rules + POD_SECURITY_GROUP_ENFORCING_MODE: "strict", + } + } + }, +}); + +// Allows the cluster to manage ENIs, required for security groups for pods +new aws.iam.RolePolicyAttachment("eks-vpc-cni-policy", { + policyArn: "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController", + role: cluster.core.clusterIamRole.name, +}); + +// Export the cluster's kubeconfig. +export const kubeconfig = cluster.kubeconfig; + +const ng = eks.createManagedNodeGroup("pod-security-policies", { + scalingConfig: { + minSize: 1, + maxSize: 2, + desiredSize: 1, + }, + // Pod Security Groups require instances that support ENI trunking. See: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html + instanceTypes: ["c6i.large", "c7i.large"], + cluster: cluster, + nodeRole: role, +}); + +const kube = cluster.provider; + +// Create a SecurityGroup for the nginx application +const nginxSg = new aws.ec2.SecurityGroup("nginx", { + vpcId: eksVpc.vpcId, +}); + +// Allow all traffic between the cluster and the nginx SecurityGroup +configureClusterAccess("nginx", cluster, nginxSg); + +// Create the nginx application (Deployment, Service, SecurityGroupPolicy) +const nginx = app.createApplication("nginx", nginxSg.id, kube); + +/* + * Verify that the SecurityGroupPolicy is working as expected. + */ + +// Create a SecurityGroup for the caller job +const callerSg = new aws.ec2.SecurityGroup("caller", { + vpcId: eksVpc.vpcId, +}); + +// Allow all traffic between the cluster and the caller SecurityGroup +configureClusterAccess("caller", cluster, callerSg); + +// Allow the caller job to access the nginx service +new aws.vpc.SecurityGroupIngressRule("caller-to-nginx", { + securityGroupId: nginxSg.id, + ipProtocol: "tcp", + fromPort: 80, + toPort: 80, + referencedSecurityGroupId: callerSg.id, +}); + +// Assign the caller SecurityGroup to the caller job +const callerSgp = new k8s.apiextensions.CustomResource("caller-sgp", { + apiVersion: "vpcresources.k8s.aws/v1beta1", + kind: "SecurityGroupPolicy", + metadata: { + name: "caller-sgp", + }, + spec: { + podSelector: { matchLabels: { app: "caller" } }, + securityGroups: { + groupIds: [callerSg.id], + } + }, +}, { provider: kube }); + +// Create a job that is allowed to curl the nginx service. The job will fail if it can't reach the service. +new k8s.batch.v1.Job("caller", { + spec: { + template: { + metadata: { + name: "caller", + labels: { + app: "caller", + }, + }, + spec: { + containers: [{ + name: "caller", + image: "curlimages/curl", + command: ["curl", "--silent", "--show-error", "--fail", pulumi.interpolate`${nginx.metadata.name}.${nginx.metadata.namespace}:80`], + }], + restartPolicy: "Never", + }, + }, + backoffLimit: 3, + }, +}, { provider: kube, dependsOn: [nginx, callerSgp] }); + +// Create a SecurityGroup for the wrongCaller job +const wrongCallerSg = new aws.ec2.SecurityGroup("wrong-caller", { + vpcId: eksVpc.vpcId, +}); + +// Allow all traffic between the cluster and the caller SecurityGroup +configureClusterAccess("wrong-caller", cluster, wrongCallerSg); + +// Assign the caller SecurityGroup to the wrongCaller job +const wrongCallerSgp = new k8s.apiextensions.CustomResource("wrong-caller-sgp", { + apiVersion: "vpcresources.k8s.aws/v1beta1", + kind: "SecurityGroupPolicy", + metadata: { + name: "wrong-caller-sgp", + }, + spec: { + podSelector: { matchLabels: { app: "wrong-caller" } }, + securityGroups: { + groupIds: [wrongCallerSg.id], + } + }, +}, { provider: kube }); + +// Create a job that is not allowed to curl the nginx service. The job will fail if it can reach the service. +new k8s.batch.v1.Job("wrong-caller", { + spec: { + template: { + metadata: { + name: "wrong-caller", + labels: { + app: "wrong-caller", + }, + }, + spec: { + containers: [{ + name: "caller", + image: "curlimages/curl", + command: [ + "sh", "-c", + pulumi.interpolate`curl --silent --show-error --fail ${nginx.metadata.name}.${nginx.metadata.namespace}:80 && exit 1 || exit 0`, + ], + }], + restartPolicy: "Never", + }, + }, + backoffLimit: 3, + }, +}, { provider: kube, dependsOn: [nginx, callerSgp] }); + +function configureClusterAccess(name: string, cluster: eks.Cluster, sg: aws.ec2.SecurityGroup) { + new aws.vpc.SecurityGroupIngressRule(`${name}-cluster-to-sg`, { + securityGroupId: sg.id, + ipProtocol: "-1", + referencedSecurityGroupId: cluster.core.cluster.vpcConfig.clusterSecurityGroupId, + }); + + new aws.vpc.SecurityGroupIngressRule(`${name}-sg-to-cluster`, { + securityGroupId: cluster.core.cluster.vpcConfig.clusterSecurityGroupId, + ipProtocol: "-1", + referencedSecurityGroupId: sg.id, + }); + + new aws.vpc.SecurityGroupEgressRule(`${name}-all`, { + securityGroupId: sg.id, + ipProtocol: "-1", + cidrIpv4: "0.0.0.0/0", + }); +} diff --git a/examples/pod-security-groups/package.json b/examples/pod-security-groups/package.json new file mode 100644 index 000000000..0b55dad53 --- /dev/null +++ b/examples/pod-security-groups/package.json @@ -0,0 +1,13 @@ +{ + "name": "pod-security-groups", + "devDependencies": { + "@types/node": "latest", + "typescript": "^4.0.0" + }, + "dependencies": { + "@pulumi/awsx": "^2.0.0", + "@pulumi/aws": "^6.50.1", + "@pulumi/eks": "latest", + "@pulumi/pulumi": "^3.0.0" + } +} diff --git a/examples/pod-security-groups/tsconfig.json b/examples/pod-security-groups/tsconfig.json new file mode 100644 index 000000000..0c3a20355 --- /dev/null +++ b/examples/pod-security-groups/tsconfig.json @@ -0,0 +1,26 @@ +{ + "compilerOptions": { + "outDir": "bin", + "target": "es6", + "lib": [ + "es6" + ], + "module": "commonjs", + "moduleResolution": "node", + "declaration": true, + "sourceMap": true, + "stripInternal": true, + "experimentalDecorators": true, + "pretty": true, + "noFallthroughCasesInSwitch": true, + "noImplicitAny": true, + "noImplicitReturns": true, + "forceConsistentCasingInFileNames": true, + "strictNullChecks": true + }, + "files": [ + "index.ts", + "iam.ts", + "application.ts" + ] +}