-
Notifications
You must be signed in to change notification settings - Fork 82
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add example for AWS feature: Security Groups for Pods #1429
Changes from 2 commits
f713efe
19656ca
3168135
1456af6
d270041
8b75313
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
name: pod-security-groups | ||
description: EKS cluster with pod security groups | ||
runtime: nodejs |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# EKS Pod Security Groups | ||
|
||
Demonstrates how to configure your Pulumi EKS Cluster to use Pod Security Groups. | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
|
||
import * as pulumi from "@pulumi/pulumi"; | ||
import * as k8s from "@pulumi/kubernetes"; | ||
|
||
export function createApplication(name: string, sgId: pulumi.Output<string>, provider: k8s.Provider): k8s.core.v1.Service { | ||
const ns = new k8s.core.v1.Namespace(name, { | ||
metadata: { name: name }, | ||
}, { provider: provider }); | ||
|
||
const sgp = new k8s.apiextensions.CustomResource(name, { | ||
apiVersion: "vpcresources.k8s.aws/v1beta1", | ||
kind: "SecurityGroupPolicy", | ||
metadata: { | ||
name: name, | ||
namespace: ns.metadata.name | ||
}, | ||
spec: { | ||
podSelector: { matchLabels: { app: name } }, | ||
securityGroups: { | ||
groupIds: [sgId], | ||
} | ||
}, | ||
}, { provider: provider }); | ||
|
||
const deployment = new k8s.apps.v1.Deployment(name, { | ||
metadata: { | ||
name: name, | ||
namespace: ns.metadata.name | ||
}, | ||
spec: { | ||
selector: { matchLabels: { app: name } }, | ||
replicas: 1, | ||
template: { | ||
metadata: { labels: { app: name } }, | ||
spec: { | ||
containers: [{ | ||
name: name, | ||
image: "nginx", | ||
ports: [{ containerPort: 80 }], | ||
}], | ||
}, | ||
}, | ||
}, | ||
}, { provider: provider }); | ||
|
||
return new k8s.core.v1.Service(name, { | ||
metadata: { | ||
name: name, | ||
namespace: ns.metadata.name | ||
}, | ||
spec: { | ||
selector: { app: name }, | ||
ports: [{ port: 80, targetPort: 80 }], | ||
}, | ||
}, { provider: provider, dependsOn: [deployment, sgp] }); | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
import * as aws from "@pulumi/aws"; | ||
import * as pulumi from "@pulumi/pulumi"; | ||
|
||
const managedPolicyArns: string[] = [ | ||
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", | ||
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", | ||
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", | ||
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore", | ||
]; | ||
|
||
// Creates a role and attches the EKS worker node IAM managed policies | ||
export function createRole(name: string): aws.iam.Role { | ||
const role = new aws.iam.Role(name, { | ||
assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ | ||
Service: "ec2.amazonaws.com", | ||
}), | ||
}); | ||
|
||
let counter = 0; | ||
for (const policy of managedPolicyArns) { | ||
t0yv0 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// Create RolePolicyAttachment without returning it. | ||
const rpa = new aws.iam.RolePolicyAttachment(`${name}-policy-${counter++}`, | ||
{ policyArn: policy, role: role }, | ||
); | ||
} | ||
|
||
return role; | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,196 @@ | ||
import * as pulumi from "@pulumi/pulumi"; | ||
import * as aws from "@pulumi/aws"; | ||
import * as awsx from "@pulumi/awsx"; | ||
import * as eks from "@pulumi/eks"; | ||
import * as iam from "./iam"; | ||
import * as k8s from "@pulumi/kubernetes"; | ||
import * as app from "./application"; | ||
|
||
// IAM roles for the node groups. | ||
const role = iam.createRole("pod-security-policies"); | ||
|
||
// Create a new VPC | ||
const eksVpc = new awsx.ec2.Vpc("pod-security-policies", { | ||
enableDnsHostnames: true, | ||
cidrBlock: "10.0.0.0/16", | ||
}); | ||
|
||
// Create an EKS cluster. | ||
const cluster = new eks.Cluster("pod-security-policies", { | ||
skipDefaultNodeGroup: true, | ||
vpcId: eksVpc.vpcId, | ||
authenticationMode: eks.AuthenticationMode.Api, | ||
// Public subnets will be used for load balancers | ||
publicSubnetIds: eksVpc.publicSubnetIds, | ||
// Private subnets will be used for cluster nodes | ||
privateSubnetIds: eksVpc.privateSubnetIds, | ||
vpcCniOptions: { | ||
// required for security groups for pods | ||
enablePodEni: true, | ||
// enables using liveness or readiness probes with security groups for pods | ||
disableTcpEarlyDemux: true, | ||
configurationValues: { | ||
env: { | ||
// all inbound/outbound traffic from pod with security group will be enforced by security group rules | ||
POD_SECURITY_GROUP_ENFORCING_MODE: "strict", | ||
} | ||
} | ||
}, | ||
}); | ||
|
||
// Allows the cluster to manage ENIs, required for security groups for pods | ||
new aws.iam.RolePolicyAttachment("eks-vpc-cni-policy", { | ||
policyArn: "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController", | ||
role: cluster.core.clusterIamRole.name, | ||
}); | ||
|
||
// Export the cluster's kubeconfig. | ||
export const kubeconfig = cluster.kubeconfig; | ||
|
||
const ng = eks.createManagedNodeGroup("pod-security-policies", { | ||
scalingConfig: { | ||
minSize: 1, | ||
maxSize: 2, | ||
desiredSize: 1, | ||
}, | ||
// Pod Security Groups require instances that support ENI trunking. See: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html | ||
instanceTypes: ["c6i.large", "c7i.large"], | ||
cluster: cluster, | ||
nodeRole: role, | ||
}); | ||
|
||
const kube = cluster.provider; | ||
|
||
// Create a SecurityGroup for the nginx application | ||
const nginxSg = new aws.ec2.SecurityGroup("nginx", { | ||
vpcId: eksVpc.vpcId, | ||
}); | ||
// Allow all traffic between the cluster to the nginx SecurityGroup | ||
configureClusterAccess("nginx", cluster, nginxSg); | ||
// Create the nginx application (Deployment, Service, SecurityGroupPolicy) | ||
const nginx = app.createApplication("nginx", nginxSg.id, kube); | ||
|
||
/* | ||
* Verify that the SecurityGroupPolicy is working as expected. | ||
*/ | ||
|
||
// Create a SecurityGroup for the caller job | ||
const callerSg = new aws.ec2.SecurityGroup("caller", { | ||
vpcId: eksVpc.vpcId, | ||
}); | ||
// Allow all traffic between the cluster and the caller SecurityGroup | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider consistent spacing. My eye not trained on TypeScript style but should this have an empty line before //? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Uh, that was by accident. Somehow format on save was turned off |
||
configureClusterAccess("caller", cluster, callerSg); | ||
// Allow the caller job to access the nginx service | ||
new aws.vpc.SecurityGroupIngressRule("caller-to-nginx", { | ||
securityGroupId: nginxSg.id, | ||
ipProtocol: "tcp", | ||
fromPort: 80, | ||
toPort: 80, | ||
referencedSecurityGroupId: callerSg.id, | ||
}); | ||
|
||
// Assign the caller SecurityGroup to the caller job | ||
const callerSgp = new k8s.apiextensions.CustomResource("caller-sgp", { | ||
apiVersion: "vpcresources.k8s.aws/v1beta1", | ||
kind: "SecurityGroupPolicy", | ||
metadata: { | ||
name: "caller-sgp", | ||
}, | ||
spec: { | ||
podSelector: { matchLabels: { app: "caller" } }, | ||
securityGroups: { | ||
groupIds: [callerSg.id], | ||
} | ||
}, | ||
}, { provider: kube }); | ||
|
||
// Create a job that is allowed to curl the nginx service. The job will fail if it can't reach the service. | ||
new k8s.batch.v1.Job("caller", { | ||
spec: { | ||
template: { | ||
metadata: { | ||
name: "caller", | ||
labels: { | ||
app: "caller", | ||
}, | ||
}, | ||
spec: { | ||
containers: [{ | ||
name: "caller", | ||
image: "curlimages/curl", | ||
command: ["curl", "--silent", "--show-error", "--fail", pulumi.interpolate`${nginx.metadata.name}.${nginx.metadata.namespace}:80`], | ||
}], | ||
restartPolicy: "Never", | ||
}, | ||
}, | ||
backoffLimit: 3, | ||
}, | ||
}, { provider: kube, dependsOn: [nginx, callerSgp] }); | ||
|
||
// Create a SecurityGroup for the wrongCaller job | ||
const wrongCallerSg = new aws.ec2.SecurityGroup("wrong-caller", { | ||
vpcId: eksVpc.vpcId, | ||
}); | ||
// Allow all traffic between the cluster and the caller SecurityGroup | ||
configureClusterAccess("wrong-caller", cluster, wrongCallerSg); | ||
|
||
// Assign the caller SecurityGroup to the wrongCaller job | ||
const wrongCallerSgp = new k8s.apiextensions.CustomResource("wrong-caller-sgp", { | ||
apiVersion: "vpcresources.k8s.aws/v1beta1", | ||
kind: "SecurityGroupPolicy", | ||
metadata: { | ||
name: "wrong-caller-sgp", | ||
}, | ||
spec: { | ||
podSelector: { matchLabels: { app: "wrong-caller" } }, | ||
securityGroups: { | ||
groupIds: [wrongCallerSg.id], | ||
} | ||
}, | ||
}, { provider: kube }); | ||
|
||
// Create a job that is not allowed to curl the nginx service. The job will fail if it can reach the service. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This feels like a counter-example to test something instead of a best-practice example of how to GTD, or am I misunderstanding? Does it belong under ./examples? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'd say showing how traffic can be restricted is an important part of a firewall example. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, yeah I concede the point. A few comments to that end then perhaps? Maybe in the readme? I still get confused by mixing tests and examples in one. |
||
new k8s.batch.v1.Job("wrong-caller", { | ||
spec: { | ||
template: { | ||
metadata: { | ||
name: "wrong-caller", | ||
labels: { | ||
app: "wrong-caller", | ||
}, | ||
}, | ||
spec: { | ||
containers: [{ | ||
name: "caller", | ||
image: "curlimages/curl", | ||
command: [ | ||
"sh", "-c", | ||
pulumi.interpolate`curl --silent --show-error --fail ${nginx.metadata.name}.${nginx.metadata.namespace}:80 && exit 1 || exit 0`, | ||
], | ||
}], | ||
restartPolicy: "Never", | ||
}, | ||
}, | ||
backoffLimit: 3, | ||
}, | ||
}, { provider: kube, dependsOn: [nginx, callerSgp] }); | ||
|
||
function configureClusterAccess(name: string, cluster: eks.Cluster, sg: aws.ec2.SecurityGroup) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this live in a helper file? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's an important part of this example (allowing the pods to reach things like coredns). Putting it into another file would hide that aspect IMO There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm ok. I'm surprised it works while written at the bottom of the file but I guess that's how Node works. |
||
new aws.vpc.SecurityGroupIngressRule(`${name}-cluster-to-sg`, { | ||
securityGroupId: sg.id, | ||
ipProtocol: "-1", | ||
referencedSecurityGroupId: cluster.core.cluster.vpcConfig.clusterSecurityGroupId, | ||
}); | ||
|
||
new aws.vpc.SecurityGroupIngressRule(`${name}-sg-to-cluster`, { | ||
securityGroupId: cluster.core.cluster.vpcConfig.clusterSecurityGroupId, | ||
ipProtocol: "-1", | ||
referencedSecurityGroupId: sg.id, | ||
}); | ||
|
||
new aws.vpc.SecurityGroupEgressRule(`${name}-all`, { | ||
securityGroupId: sg.id, | ||
ipProtocol: "-1", | ||
cidrIpv4: "0.0.0.0/0", | ||
}); | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
{ | ||
"name": "pod-security-groups", | ||
"devDependencies": { | ||
"@types/node": "latest", | ||
"typescript": "^4.0.0" | ||
}, | ||
"dependencies": { | ||
"@pulumi/awsx": "^2.0.0", | ||
"@pulumi/aws": "^6.50.1", | ||
"@pulumi/eks": "latest", | ||
"@pulumi/pulumi": "^3.0.0" | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
{ | ||
"compilerOptions": { | ||
"outDir": "bin", | ||
"target": "es6", | ||
"lib": [ | ||
"es6" | ||
], | ||
"module": "commonjs", | ||
"moduleResolution": "node", | ||
"declaration": true, | ||
"sourceMap": true, | ||
"stripInternal": true, | ||
"experimentalDecorators": true, | ||
"pretty": true, | ||
"noFallthroughCasesInSwitch": true, | ||
"noImplicitAny": true, | ||
"noImplicitReturns": true, | ||
"forceConsistentCasingInFileNames": true, | ||
"strictNullChecks": true | ||
}, | ||
"files": [ | ||
"index.ts", | ||
"iam.ts", | ||
"application.ts" | ||
] | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It could be cute to include a mermaid diagram that shows what talks to what in this example, but it may be a lot of work. Wonder if AI can do a quick pass at it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good idea! I added one