Skip to content

Commit

Permalink
I-ALiRT: OpsSW test updates (#388)
Browse files Browse the repository at this point in the history
* port mapping
  • Loading branch information
laspsandoval authored Nov 12, 2024
1 parent 1e2ab0a commit d646fd8
Show file tree
Hide file tree
Showing 5 changed files with 93 additions and 68 deletions.
73 changes: 45 additions & 28 deletions sds_data_manager/constructs/ialirt_processing_construct.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ def __init__(
construct_id: str,
vpc: ec2.Vpc,
processing_name: str,
ialirt_ports: list[int],
container_port: int,
ports: list[int],
ialirt_bucket: s3.Bucket,
secret_name: str,
**kwargs,
Expand All @@ -45,10 +44,8 @@ def __init__(
VPC into which to put the resources that require networking.
processing_name : str
Name of the processing stack.
ialirt_ports : list[int]
List of ports to listen on for incoming traffic.
container_port : int
Port to be used by the container.
ports : list[int]
List of ports to listen on for incoming traffic and used by container.
ialirt_bucket: s3.Bucket
S3 bucket
secret_name : str,
Expand All @@ -59,8 +56,7 @@ def __init__(
"""
super().__init__(scope, construct_id, **kwargs)

self.ports = ialirt_ports
self.container_port = container_port
self.ports = ports
self.vpc = vpc
self.s3_bucket_name = ialirt_bucket.bucket_name
self.secret_name = secret_name
Expand Down Expand Up @@ -89,14 +85,14 @@ def create_ecs_security_group(self, processing_name):
)

# Only allow traffic from the NLB security group
self.ecs_security_group.add_ingress_rule(
peer=ec2.Peer.security_group_id(
self.load_balancer_security_group.security_group_id
),
connection=ec2.Port.tcp(self.container_port),
description=f"Allow inbound traffic from the NLB on "
f"TCP port {self.container_port}",
)
for port in self.ports:
self.ecs_security_group.add_ingress_rule(
peer=ec2.Peer.security_group_id(
self.load_balancer_security_group.security_group_id
),
connection=ec2.Port.tcp(port),
description=f"Allow inbound traffic from the NLB on TCP port {port}",
)

def create_load_balancer_security_group(self, processing_name):
"""Create and return a security group for load balancers."""
Expand Down Expand Up @@ -207,8 +203,8 @@ def add_compute_resources(self, processing_name):
# Allowable values:
# https://docs.aws.amazon.com/cdk/api/v2/docs/
# aws-cdk-lib.aws_ecs.TaskDefinition.html#cpu
memory_limit_mib=512,
cpu=256,
memory_limit_mib=1024,
cpu=512,
logging=ecs.LogDrivers.aws_logs(stream_prefix=f"Ialirt{processing_name}"),
environment={"S3_BUCKET": self.s3_bucket_name},
# Ensure the ECS task is running in privileged mode,
Expand All @@ -219,12 +215,13 @@ def add_compute_resources(self, processing_name):
# Map ports to container
# NLB needs to know which port on the EC2 instances
# it should forward the traffic to
port_mapping = ecs.PortMapping(
container_port=self.container_port,
host_port=self.container_port,
protocol=ecs.Protocol.TCP,
)
container.add_port_mappings(port_mapping)
for port in self.ports:
port_mapping = ecs.PortMapping(
container_port=port,
host_port=port,
protocol=ecs.Protocol.TCP,
)
container.add_port_mappings(port_mapping)

# ECS Service is a configuration that
# ensures application can run and maintain
Expand All @@ -250,7 +247,7 @@ def add_autoscaling(self, processing_name):
self,
f"AutoScalingGroup{processing_name}",
instance_type=ec2.InstanceType.of(
ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MICRO
ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL
),
machine_image=ecs.EcsOptimizedImage.amazon_linux2(),
vpc=self.vpc,
Expand All @@ -259,6 +256,13 @@ def add_autoscaling(self, processing_name):

auto_scaling_group.apply_removal_policy(RemovalPolicy.DESTROY)

# Attach the AmazonSSMManagedInstanceCore policy for SSM access
auto_scaling_group.role.add_managed_policy(
iam.ManagedPolicy.from_aws_managed_policy_name(
"AmazonSSMManagedInstanceCore"
)
)

# integrates ECS with EC2 Auto Scaling Groups
# to manage the scaling and provisioning of the underlying
# EC2 instances based on the requirements of ECS tasks
Expand Down Expand Up @@ -301,9 +305,22 @@ def add_load_balancer(self, processing_name):

# Register the ECS service as a target for the listener
listener.add_targets(
f"Target{processing_name}{self.container_port}",
port=self.container_port,
targets=[self.ecs_service],
f"Target{processing_name}{port}",
port=port,
# Specifies the container and port to route traffic to.
targets=[
self.ecs_service.load_balancer_target(
container_name=f"IalirtContainer{processing_name}",
container_port=port,
)
],
# Configures health checks for the target group
# to ensure traffic is routed only to healthy ECS tasks.
health_check=elbv2.HealthCheck(
enabled=True,
port=str(port),
protocol=elbv2.Protocol.TCP,
),
)

# This simply prints the DNS name of the
Expand Down
8 changes: 3 additions & 5 deletions sds_data_manager/utils/stackbuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,18 +265,16 @@ def build_sds(
)

# All traffic to I-ALiRT is directed to listed container ports
ialirt_ports = {"Primary": [8080, 8081], "Secondary": [80]}
container_ports = {"Primary": 8080, "Secondary": 80}
ports = {"Primary": [1234, 1235], "Secondary": [1236]}
ialirt_secret_name = "nexus-credentials" # noqa

for primary_or_secondary in ialirt_ports:
for primary_or_secondary in ports:
ialirt_processing_construct.IalirtProcessing(
scope=ialirt_stack,
construct_id=f"IalirtProcessing{primary_or_secondary}",
vpc=networking.vpc,
processing_name=primary_or_secondary,
ialirt_ports=ialirt_ports[primary_or_secondary],
container_port=container_ports[primary_or_secondary],
ports=ports[primary_or_secondary],
ialirt_bucket=ialirt_bucket.ialirt_bucket,
secret_name=ialirt_secret_name,
)
Expand Down
25 changes: 12 additions & 13 deletions tests/infrastructure/test_ialirt_processing_construct.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,36 +8,35 @@
def get_nlb_dns(stack_name, port, container_name):
"""Retrieve DNS for the NLB from CloudFormation."""
client = boto3.client("cloudformation")
response = client.describe_constructs(StackName=stack_name)
response = client.describe_stacks(StackName=stack_name)
output_key = f"LoadBalancerDNS{container_name}{port}"
outputs = response["Stacks"][0]["Outputs"]
for output in outputs:
if output["OutputKey"] == output_key:
if output_key in output["OutputKey"]:
return output["OutputValue"]
raise ValueError(f"DNS output not found for port {port} in stack.")


@pytest.mark.xfail(reason="Will fail unless IALiRT stack is deployed.")
def test_nlb_response():
"""Test to ensure the NLB responds with HTTP 200 status."""
ialirt_ports = {"Primary": [8080, 8081], "Secondary": [80]}
container_ports = {"Primary": 8080, "Secondary": 80}
ialirt_ports = {"Primary": [1235, 1234], "Secondary": [1236]}

for stack_name, ports in ialirt_ports.items():
for container_name, ports in ialirt_ports.items():
for port in ports:
nlb_dns = get_nlb_dns(f"IalirtProcessing{stack_name}", port, stack_name)
print(f"Testing URL: {nlb_dns}")
nlb_dns = get_nlb_dns("IalirtStack", port, container_name)
# Specify a timeout for the request
response = requests.get(nlb_dns, timeout=10) # timeout in seconds
assert (
response.status_code == 200
), f"NLB did not return HTTP 200 on port {port} for {stack_name}"
), f"NLB did not return HTTP 200 on port {port} for {container_name}"
assert (
response.text == f"Hello World from Port {container_ports[stack_name]}."
), f"NLB did not return expected text on port {port} for {stack_name}"
response.text == f"Hello from Port {port}!"
), f"NLB did not return expected text on port {port} for {container_name}"
s3_response = requests.get(
nlb_dns + "/list", timeout=10
) # timeout in seconds
assert (
f"test_file{container_ports[stack_name]}.txt" in s3_response.text
), f"NLB did not return expected file name on port {port} for {stack_name}"
assert f"test_file{port}.txt" in s3_response.text, (
f"NLB did not return expected file name on port {port} "
f"for {container_name}"
)
6 changes: 3 additions & 3 deletions tests/test-data/ialirt_ec2/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ COPY entrypoint.sh /app/entrypoint.sh
# Make the scripts executable
RUN chmod +x /app/mount_s3.sh /app/start_flask.sh /app/entrypoint.sh

# Make port 8080 available to the world outside this container
# Note: The port number is changed from 8080 to 80 for the secondary system.
EXPOSE 80
# Make ports available to the world outside this container
# Note: The port numbers are changed for the secondary system.
EXPOSE 1234 1235

# Set the AWS region
ENV AWS_REGION us-west-2
Expand Down
49 changes: 30 additions & 19 deletions tests/test-data/ialirt_ec2/test_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,36 @@
A simple Flask web application designed to be Dockerized and deployed on an
EC2 instance. Intended for verifying the successful deployment and operation in
an ECR and EC2 setup. The application listens on all interfaces (0.0.0.0) at
port 8080, allowing external access for testing.
defined ports, allowing external access for testing.
"""

import multiprocessing
import os

from flask import Flask

# Create a Flask application
app = Flask(__name__)
# Note: The port number is changed from 8080 to 80 in the secondary Dockerfile.
port = 80

def create_app(port):
"""Create Flask application for a specific port."""
app = Flask(__name__)

# Decorator that tells Flask what URL
# should trigger the function that follows.
@app.route("/")
def hello():
"""Hello world function to test with."""
return f"Hello World from Port {port}."
# Decorator that tells Flask what URL
# should trigger the function that follows.
@app.route("/")
def hello():
"""Hello world function to test with."""
return f"Hello from Port {port}!"

@app.route("/list")
def list_files():
"""List files in the mounted S3 bucket."""
files = os.listdir("/mnt/s3/packets")
return "<br>".join(files)

@app.route("/list")
def list_files():
"""List files in the mounted S3 bucket."""
files = os.listdir("/mnt/s3/packets")
return "<br>".join(files)
app.run(host="0.0.0.0", port=port)


def create_and_save_file():
def create_and_save_file(port):
"""Create and save file to S3 bucket."""
s3_mount_dir = "/mnt/s3/packets"

Expand All @@ -50,5 +51,15 @@ def create_and_save_file():


if __name__ == "__main__":
create_and_save_file()
app.run(host="0.0.0.0", port=port)
ports = [1234, 1235]
for port in ports:
create_and_save_file(port)
processes = [
multiprocessing.Process(target=create_app, args=(port,)) for port in ports
]

for process in processes:
process.start()

for process in processes:
process.join()

0 comments on commit d646fd8

Please sign in to comment.