Skip to content

Commit

Permalink
FEATURE: EKS Protection (#61)
Browse files Browse the repository at this point in the history
* codebuild source

* lambda source

* templates

* documentation

* vpcid

* architecture diagram

* extend orgid function

* handle delete

* remove pptx

* lint python

* resolve comments

* pylint

* add eks test

* default values

* move codebuild package

* fix checkov comment

* codebuild package path

* fix subs

* orgid function

* eb account conditional

* data var

* missing comma

* Update entrypoint.sh

* checkov skip

* bugfix ssm action

* lambda descriptions

* minor version

---------

Co-authored-by: Kishore Vinjam <kkvinjam@gmail.com>
  • Loading branch information
ryanjpayne and kkvinjam authored Oct 23, 2024
1 parent 2f3640f commit 1573fce
Show file tree
Hide file tree
Showing 39 changed files with 2,568 additions and 87 deletions.
4 changes: 4 additions & 0 deletions .project_automation/functional_tests/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ run_test() {
# Run taskcat e2e test
run_test "cw-test"

run_test "cw-test-sra"

run_test "cw-eks-test"

run_test "cw-test-ct"

run_test "cw-test-ssm"
Expand Down
14 changes: 14 additions & 0 deletions .taskcat.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,20 @@ tests:
regions:
- us-east-1
template: templates/crowdstrike_init_stack.yaml
cw-eks-test:
parameters:
FalconClientID: $[taskcat_ssm_/crowdstrike/falcon_client_id]
FalconSecret: $[taskcat_ssm_/crowdstrike/falcon_secret]
DockerAPIToken: $[taskcat_ssm_/crowdstrike/falcon_docker_api_token]
FalconCID: $[taskcat_ssm_/crowdstrike/falcon_cod]
EventBusAccount: $[taskcat_ssm_/crowdstrike/eventbus_account]
SourceS3BucketName: $[taskcat_autobucket]
S3BucketRegion: $[taskcat_current_region]
ProvisionOU: $[taskcat_ssm_/crowdstrike/provision-ou]
ExcludeRegions: $[taskcat_ssm_/crowdstrike/exclude_regions]
regions:
- us-east-1
template: templates/crowdstrike_init_stack.yaml
cw-test-trail:
parameters:
FalconClientID: $[taskcat_ssm_/crowdstrike/falcon_client_id]
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v1.0.0
v1.1.0
34 changes: 34 additions & 0 deletions codebuild/source/buildspec.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
version: 0.2

phases:
install:
on-failure: ABORT
commands:
- echo "Installing Prerequisites"
- apt-get -qq update
- apt-get -qq install -y python3
- apt-get -qq install -y python3-pip
- pip3 install boto3 --quiet
- pip3 install botocore --quiet
- curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.5/2024-01-04/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH
- curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
- helm repo add crowdstrike https://crowdstrike.github.io/falcon-helm && helm repo update
- ARCH=amd64
- PLATFORM=$(uname -s)_$ARCH
- curl -sLO "https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_$PLATFORM.tar.gz"
- tar -xzf eksctl_$PLATFORM.tar.gz -C /tmp && rm eksctl_$PLATFORM.tar.gz
- mv /tmp/eksctl /usr/local/bin
pre_build:
on-failure: ABORT
commands:
- python3 setup_cluster.py
- chmod +x setup_images.sh && ./setup_images.sh
- . /root/.bashrc
- chmod +x setup_manifests.sh && ./setup_manifests.sh
build:
on-failure: ABORT
commands:
- chmod +x install_sensor_$NODE_TYPE.sh
- ./install_sensor_$NODE_TYPE.sh
11 changes: 11 additions & 0 deletions codebuild/source/falcon_admission.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: falcon.crowdstrike.com/v1alpha1
kind: FalconAdmission
metadata:
name: falcon-admission
spec:
falcon_api:
client_id: FALCON_CLIENT_ID
client_secret: FALCON_CLIENT_SECRET
cloud_region: autodiscover
registry:
type: REGISTRY
57 changes: 57 additions & 0 deletions codebuild/source/install_sensor_fargate.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/bin/bash

role_arn=arn:aws:iam::${ACCOUNT_ID}:role/${SWITCH_ROLE}
OUT=$(aws sts assume-role --role-arn $role_arn --role-session-name crowdstrike-eks-codebuild);\
export AWS_ACCESS_KEY_ID=$(echo $OUT | jq -r '.Credentials''.AccessKeyId');\
export AWS_SECRET_ACCESS_KEY=$(echo $OUT | jq -r '.Credentials''.SecretAccessKey');\
export AWS_SESSION_TOKEN=$(echo $OUT | jq -r '.Credentials''.SessionToken');

echo "Creating kubeconfig for $CLUSTER"
aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTER

export AWS_ACCESS_KEY_ID=""
export AWS_SECRET_ACCESS_KEY=""
export AWS_SESSION_TOKEN=""

pods=$(kubectl get pods -A)
case "$pods" in
*kpagent*)
echo "Protection Agent already installed on cluster: $CLUSTER"
;;
*)
echo "Installing Protection Agent..."
helm upgrade --install -f kpa_config.value --create-namespace -n falcon-kubernetes-protection kpagent crowdstrike/cs-k8s-protection-agent
;;
esac
case "$pods" in
*falcon-operator*)
echo "Operator already installed on cluster: $CLUSTER"
;;
*)
echo "Installing Operator..."
eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-operator --namespace falcon-operator
kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml
;;
esac
case "$pods" in
*falcon-sidecar-sensor*)
echo "Sensor already installed on cluster: $CLUSTER"
;;
*)
echo "Installing sensor..."
eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-system --namespace falcon-system
kubectl create -f sidecar_sensor.yaml
;;
esac
if [ $ENABLE_KAC == "true" ]; then
case "$pods" in
*falcon-admission*)
echo "Admission Controller already installed on cluster: $CLUSTER"
;;
*)
echo "Installing Admission Controller..."
eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-kac --namespace falcon-kac
kubectl create -f falcon_admission.yaml
;;
esac
fi
65 changes: 65 additions & 0 deletions codebuild/source/install_sensor_nodegroup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/bin/bash

role_arn=arn:aws:iam::${ACCOUNT_ID}:role/${SWITCH_ROLE}
OUT=$(aws sts assume-role --role-arn $role_arn --role-session-name crowdstrike-eks-codebuild);\
export AWS_ACCESS_KEY_ID=$(echo $OUT | jq -r '.Credentials''.AccessKeyId');\
export AWS_SECRET_ACCESS_KEY=$(echo $OUT | jq -r '.Credentials''.SecretAccessKey');\
export AWS_SESSION_TOKEN=$(echo $OUT | jq -r '.Credentials''.SessionToken');

echo "Creating kubeconfig for $CLUSTER"
aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTER

export AWS_ACCESS_KEY_ID=""
export AWS_SECRET_ACCESS_KEY=""
export AWS_SESSION_TOKEN=""

pods=$(kubectl get pods -A)
case "$pods" in
*kpagent*)
echo "Protection Agent already installed on cluster: $CLUSTER"
;;
*)
echo "Installing Protection Agent..."
helm upgrade --install -f kpa_config.value --create-namespace -n falcon-kubernetes-protection kpagent crowdstrike/cs-k8s-protection-agent
;;
esac
case "$pods" in
*falcon-operator*)
echo "Operator already installed on cluster: $CLUSTER"
;;
*)
echo "Installing Operator..."
if [ $REGISTRY == "ecr" ]; then
eksctl utils associate-iam-oidc-provider --region $AWS_REGION --cluster $CLUSTER --approve
kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml
kubectl set env -n falcon-operator deployment/falcon-operator-controller-manager AWS_REGION=$IMAGE_REGION
else
kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml
fi
;;
esac
case "$pods" in
*falcon-node-sensor*)
echo "Sensor already installed on cluster: $CLUSTER"
;;
*)

echo "Installing node sensor..."
if [ $REGISTRY == "ecr" ]; then
kubectl create -f node_sensor_ecr.yaml
else
kubectl create -f node_sensor.yaml
fi
;;
esac
if [ $ENABLE_KAC == "true" ]; then
case "$pods" in
*falcon-admission*)
echo "Admission Controller already installed on cluster: $CLUSTER"
;;
*)
echo "Installing Admission Controller..."
kubectl create -f falcon_admission.yaml
;;
esac
fi
10 changes: 10 additions & 0 deletions codebuild/source/kpa_config.value
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
image:
repository: KPA_URI
tag: KPA_TAG
crowdstrikeConfig:
clientID: FALCON_CLIENT_ID
clientSecret: FALCON_CLIENT_SECRET
clusterName: CLUSTER_ARN
env: CROWDSTRIKE_CLOUD
cid: CID_LOWER
dockerAPIToken: DOCKER_API_TOKEN
14 changes: 14 additions & 0 deletions codebuild/source/node_sensor.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: falcon.crowdstrike.com/v1alpha1
kind: FalconNodeSensor
metadata:
name: falcon-node-sensor
spec:
falcon_api:
client_id: FALCON_CLIENT_ID
client_secret: FALCON_CLIENT_SECRET
cloud_region: autodiscover
node:
backend: BACKEND
falcon:
tags:
- daemonset
21 changes: 21 additions & 0 deletions codebuild/source/node_sensor_ecr.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
apiVersion: falcon.crowdstrike.com/v1alpha1
kind: FalconNodeSensor
metadata:
labels:
crowdstrike.com/component: sample
crowdstrike.com/created-by: falcon-operator
crowdstrike.com/instance: falcon-node-sensor
crowdstrike.com/managed-by: kustomize
crowdstrike.com/name: falconnodesensor
crowdstrike.com/part-of: Falcon
crowdstrike.com/provider: crowdstrike
name: falcon-node-sensor
spec:
node:
backend: BACKEND
image: NODE_SENSOR_URI:NODE_SENSOR_TAG
falcon:
cid: CID
trace: none
tags:
- daemonset
116 changes: 116 additions & 0 deletions codebuild/source/setup_cluster.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import os
import time
import boto3
import botocore

AWS_REGION = os.environ['AWS_REGION']
PRINCIPAL_ARN = os.environ['PRINCIPAL_ARN']
USERNAME = os.environ['USERNAME']
CLUSTER = os.environ['CLUSTER']
NODETYPE = os.environ['NODE_TYPE']
ACCOUNT_ID = os.environ['ACCOUNT_ID']
REGION = os.environ['REGION']
SWITCH_ROLE = os.environ['SWITCH_ROLE']
NAT_IP = os.environ['NAT_IP']
ACCESS_POLICY = 'arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy'

def check_cluster(session):
client = session.client(
service_name='eks',
region_name=AWS_REGION
)

cluster_details = client.describe_cluster(
name=CLUSTER
)
public_access_cidrs = cluster_details.get('cluster', {}).get('resourcesVpcConfig', {}).get('publicAccessCidrs')
while 'ACTIVE' not in cluster_details.get('cluster', {}).get('status'):
time.sleep(60)
cluster_details = client.describe_cluster(
name=CLUSTER
)
else:
print(f'Cluster {CLUSTER} is now active')
return public_access_cidrs

def setup_cluster(session, public_access_cidrs):
client = session.client(
service_name='eks',
region_name=AWS_REGION
)

try:
print(f'Adding access entry for {CLUSTER}')
client.create_access_entry(
clusterName=CLUSTER,
principalArn=PRINCIPAL_ARN,
username=USERNAME,
type='STANDARD'
)

except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == "ResourceInUseException":
print(f'Skipping Access Entry for {CLUSTER}: {PRINCIPAL_ARN} already exists')
else:
print(error)
try:
print(f'Adding access policy for {CLUSTER}')
client.associate_access_policy(
clusterName=CLUSTER,
principalArn=PRINCIPAL_ARN,
policyArn=ACCESS_POLICY,
accessScope={
'type': 'cluster'
}
)
except botocore.exceptions.ClientError as error:
print(error)
try:
print(f'Adding NAT IP for {CLUSTER}')
public_access_cidrs.append(f'{NAT_IP}/32')
response = session.update_cluster_config(
name=CLUSTER,
resourcesVpcConfig={
'publicAccessCidrs': public_access_cidrs
}
)
update_id = response['update']['id']
update_response = client.describe_update(
name=CLUSTER,
updateId=update_id
)
while update_response['update']['status'] in 'InProgress':
print('waiting for update to complete...')
time.sleep(30)
update_response = client.describe_update(
name=CLUSTER,
updateId=update_id
)
except botocore.exceptions.ClientError as error:
print(error)
print(f'Cluster: {CLUSTER} is now setup')
return

def new_session():
try:
sts_connection = boto3.client('sts')
credentials = sts_connection.assume_role(
RoleArn=f'arn:aws:iam::{ACCOUNT_ID}:role/{SWITCH_ROLE}',
RoleSessionName=f'crowdstrike-eks-{ACCOUNT_ID}'
)
return boto3.session.Session(
aws_access_key_id=credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=credentials['Credentials']['SecretAccessKey'],
aws_session_token=credentials['Credentials']['SessionToken'],
region_name=REGION
)
except sts_connection.exceptions.ClientError as exc:
# Print the error and continue.
# Handle what to do with accounts that cannot be accessed
# due to assuming role errors.
print("Cannot access adjacent account: ", ACCOUNT_ID, exc)
return None

session = new_session()
public_access_cidrs = check_cluster(session)
setup_cluster(session, public_access_cidrs)
Loading

0 comments on commit 1573fce

Please sign in to comment.