diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b9e4b8e..5b80332 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -35,12 +35,4 @@ updates: package-ecosystem: terraform schedule: interval: weekly - - - directory: /examples/basic_usage - ignore: - # Managed by cisagov/skeleton-tf-module - - dependency-name: hashicorp/aws - package-ecosystem: terraform - schedule: - interval: weekly version: 2 diff --git a/.gitignore b/.gitignore index 56f4427..6d9d09c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,10 @@ # Files already tracked by Git are not affected. # See: https://git-scm.com/docs/gitignore +## Project Specific ## +add_security_headers.zip +lambda_build.zip + ## Python ## __pycache__ .mypy_cache diff --git a/README.md b/README.md index 11bc07e..23fe623 100644 --- a/README.md +++ b/README.md @@ -2,33 +2,73 @@ [![GitHub Build Status](https://github.com/cisagov/publish-egress-ip-terraform/workflows/build/badge.svg)](https://github.com/cisagov/publish-egress-ip-terraform/actions) -This is a generic skeleton project that can be used to quickly get a -new [cisagov](https://github.com/cisagov) [Terraform -module](https://www.terraform.io/docs/modules/index.html) GitHub -repository started. This skeleton project contains [licensing -information](LICENSE), as well as [pre-commit -hooks](https://pre-commit.com) and -[GitHub Actions](https://github.com/features/actions) configurations -appropriate for the major languages that we use. - -See [here](https://www.terraform.io/docs/modules/index.html) for more -details on Terraform modules and the standard module structure. - -## Usage ## - -```hcl -module "example" { - source = "github.com/cisagov/publish-egress-ip-terraform" - - aws_region = "us-west-1" - aws_availability_zone = "b" - subnet_id = "subnet-0123456789abcdef0" -} -``` - -## Examples ## - -- [Basic usage](https://github.com/cisagov/publish-egress-ip-terraform/tree/develop/examples/basic_usage) +This repository contains Terraform code to deploy +[`cisagov/publish-egress-ip-lambda`](https://github.com/cisagov/publish-egress-ip-lambda) +and related resources. + +## Pre-requisites ## + +- [Terraform](https://www.terraform.io/) installed on your system. +- An accessible AWS S3 bucket to store Terraform state + (specified in [`backend.tf`](backend.tf)). +- An accessible AWS DynamoDB database to store the Terraform state lock + (specified in [`backend.tf`](backend.tf)). +- Access to all of the Terraform remote states specified in + [`remote_states.tf`](remote_states.tf). +- A valid Lambda deployment file must be present in the root directory and have + the same name as `var.lambda_zip_filename` (e.g. "lambda_build.zip"). +- A Terraform [variables](variables.tf) file customized for your + assessment environment, for example: + + ```hcl + bucket_name = "s3-cdn.egress-info.my.domain.gov" + domain = "egress-info.my.domain.gov" + deployment_role_arn = "arn:aws:iam::123456789012:role/deployment-role" + file_configs = [ + { + "filename": "all.txt", + "app_regex": ".*", + "static_ips": [], + "description": "This file contains a list of all public IP addresses." + }, + { + "filename": "vs.txt", + "app_regex": "^Vulnerability Scanning$", + "static_ips": [ + "192.168.1.1/32", + "192.168.2.2/32" + ], + "description": "This file contains a list of all IPs used for Vulnerability Scanning." + } + ] + route53_role_arn = "arn:aws:iam::123456789012:role/route53-role" + + tags = { + Team = "VM Fusion - Development" + Application = "Publish Egress IP" + Workspace = "production" + } + ``` + +## Building the Terraform-based infrastructure ## + +1. Create a Terraform workspace (if you haven't already done so) for + your assessment by running `terraform workspace new `. +1. Create a `.tfvars` file with all of the required + variables (see [Inputs](#inputs) below for details). +1. Run the command `terraform init`. +1. Create all Terraform infrastructure by running the command: + + ```console + terraform apply -var-file=.tfvars + ``` + +After the Terraform code has been deployed and the Lambda has run +successfully, you will be able to see your published egress IP address +information at: `https://` + +If you defined additional files via `var.file_configs`, they can be +accessed at: `https:///` ## Requirements ## @@ -43,37 +83,81 @@ module "example" { | Name | Version | |------|---------| | aws | ~> 4.9 | +| aws.deploy | ~> 4.9 | +| aws.organizationsreadonly | ~> 4.9 | +| aws.route53resourcechange | ~> 4.9 | +| terraform | n/a | ## Modules ## -No modules. +| Name | Source | Version | +|------|--------|---------| +| security\_header\_lambda | transcend-io/lambda-at-edge/aws | 0.5.0 | ## Resources ## | Name | Type | |------|------| -| [aws_instance.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | -| [aws_ami.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | -| [aws_default_tags.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/default_tags) | data source | +| [aws_cloudfront_distribution.rules_s3_distribution](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution) | resource | +| [aws_cloudwatch_event_rule.lambda_schedule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.lambda_schedule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_cloudwatch_log_group.lambda_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_iam_policy.lambdaexecution_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.lambdaexecution_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.lambdaexecution_policy_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_lambda_function.publish_egress_ip](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_lambda_permission.allow_cloudwatch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_route53_record.rules_vm_A](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_route53_record.rules_vm_AAAA](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_s3_bucket.egress_info](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket.lambda_at_edge](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket_acl.egress_info](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_acl) | resource | +| [aws_s3_bucket_public_access_block.lambda_artifact_bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block) | resource | +| [aws_s3_bucket_server_side_encryption_configuration.egress_info](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | +| [aws_s3_bucket_server_side_encryption_configuration.lambda_at_edge](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | +| [aws_s3_bucket_versioning.lambda_at_edge](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning) | resource | +| [aws_acm_certificate.rules_cert](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_caller_identity.deploy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_iam_policy_document.lambda_assume_role_doc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.lambdaexecution_doc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_organizations_organization.org](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/organizations_organization) | data source | +| [terraform_remote_state.dns](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | +| [terraform_remote_state.dns_cyber_dhs_gov](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | +| [terraform_remote_state.master](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | +| [terraform_remote_state.terraform](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | ## Inputs ## | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| ami\_owner\_account\_id | The ID of the AWS account that owns the Example AMI, or "self" if the AMI is owned by the same account as the provisioner. | `string` | `"self"` | no | -| aws\_availability\_zone | The AWS availability zone to deploy into (e.g. a, b, c, etc.). | `string` | `"a"` | no | +| account\_name\_regex | A regular expression that will be applied against the names of all non-master accounts in the AWS organization. If the name of an account matches the regular expression, that account will be queried for egress IP addresses to publish. The default value should not match any valid account name. | `string` | `"^$"` | no | +| application\_tag | The name of the AWS tag whose value represents the application associated with an IP address. | `string` | `"Application"` | no | | aws\_region | The AWS region to deploy into (e.g. us-east-1). | `string` | `"us-east-1"` | no | -| subnet\_id | The ID of the AWS subnet to deploy into (e.g. subnet-0123456789abcdef0). | `string` | n/a | yes | +| bucket\_name | The name of the S3 bucket to publish egress IP address information to. | `string` | n/a | yes | +| deployment\_role\_arn | The ARN of the IAM role to use to deploy the Lambda and all related resources. | `string` | n/a | yes | +| domain | The domain hosting the published file(s) containing egress IPs. Also used for the CloudFront distribution and certificate. | `string` | n/a | yes | +| ec2\_read\_role\_name | The name of the IAM role that allows read access to the necessary EC2 attributes. Note that this role must exist in each account that you want to query. | `string` | `"EC2ReadOnly"` | no | +| extraorg\_account\_ids | A list of AWS account IDs corresponding to "extra" accounts that you want to query for egress IPs to publish. | `list(string)` | `[]` | no | +| file\_configs | A list of objects that define the files to be published. "app\_regex" specifies a regular expression matching the application name (based on the variable var.application\_tag). "description" is the description of the published file. "filename" is the name to assign the published file. "static\_ips" is a list of CIDR blocks that will always be included in the published file. An example file configuration looks like this: `[{"app_regex": ".*", "description": "This file contains a list of all public IP addresses to be published.", "filename": "all.txt", "static_ips": []}, {"app_regex": "^Vulnerability Scanning$", "description": "This file contains a list of all IPs used for Vulnerability Scanning.", "filename": "vs.txt", "static_ips": ["192.168.1.1/32", "192.168.2.2/32"]}]` | `list(object({ app_regex = string, description = string, filename = string, static_ips = list(string) }))` | `[]` | no | +| file\_header | The header template for each published file. When the file is published, newline characters are automatically added between each item in the list. The following variables are available within the template: {domain} - the domain where the published files are located, {filename} - the name of the published file, {timestamp} - the timestamp when the file was published, {description} - the description of the published file | `list(string)` | ```[ "###", "# https://{domain}/{filename}", "# {timestamp}", "# {description}", "###" ]``` | no | +| lambda\_function\_description | The description of the Lambda function. | `string` | `"Lambda function to publish egress IP addresses to an S3 bucket configured with a CloudFront distribution for HTTPS access."` | no | +| lambda\_function\_name | The name of the Lambda function to publish egress IP addresses. | `string` | `"publish-egress-ip"` | no | +| lambda\_schedule\_interval | The number of minutes between scheduled runs of the Lambda function to publish egress IP addresses. This value must be an integer greater than 0. | `number` | `60` | no | +| lambda\_zip\_filename | The name of the ZIP file containing the Lambda function deployment package to publish egress IP addresses. The file must be located in the root directory of this project. | `string` | `"lambda_build.zip"` | no | +| lambdaexecution\_role\_description | The description to associate with the IAM role (and policy) that allows the publish-egress-ip Lambda to query other accounts for public EC2 IP information, publish objects to the S3 bucket, and write CloudWatch logs. | `string` | `"Allows the publish-egress-ip Lambda to query other accounts for public EC2 IP information, publish objects to the S3 bucket, and write CloudWatch logs."` | no | +| lambdaexecution\_role\_name | The name to assign the IAM role (and policy) that allows the publish-egress-ip Lambda to query other accounts for public EC2 IP information, publish objects to the S3 bucket, and write CloudWatch logs. | `string` | `"PublishEgressIPLambda"` | no | +| publish\_egress\_tag | The name of the AWS resource tag whose value represents whether the EC2 instance or elastic IP should have its public IP address published. | `string` | `"Publish Egress"` | no | +| region\_filters | A list of AWS EC2 region filters to use when querying for IP addresses to publish. If a filter is not specified, the query will be performed in all regions. An example filter to restrict to US regions looks like this: `[{ "Name" : "endpoint", "Values" : ["*.us-*"] }]`. For more information, refer to . | `list(object({ Name = string, Values = list(string) }))` | `[]` | no | +| root\_object | The root object in the S3 bucket to serve when no path is provided or an error occurs. | `string` | `"all.txt"` | no | +| route53\_role\_arn | The ARN of the IAM role to use to modify Route53 DNS resources. | `string` | n/a | yes | +| tags | Tags to apply to all AWS resources created. | `map(string)` | `{}` | no | ## Outputs ## | Name | Description | |------|-------------| -| arn | The EC2 instance ARN. | -| availability\_zone | The AZ where the EC2 instance is deployed. | -| id | The EC2 instance ID. | -| private\_ip | The private IP of the EC2 instance. | -| subnet\_id | The ID of the subnet where the EC2 instance is deployed. | +| bucket | The S3 bucket where egress IP address information is published. | ## Notes ## @@ -82,13 +166,6 @@ Running `pre-commit` requires running `terraform init` in every directory that contains Terraform code. In this repository, these are the main directory and every directory under `examples/`. -## New Repositories from a Skeleton ## - -Please see our [Project Setup guide](https://github.com/cisagov/development-guide/tree/develop/project_setup) -for step-by-step instructions on how to start a new repository from -a skeleton. This will save you time and effort when configuring a -new repository! - ## Contributing ## We welcome contributions! Please see [`CONTRIBUTING.md`](CONTRIBUTING.md) for diff --git a/add_security_headers/index.js b/add_security_headers/index.js new file mode 100644 index 0000000..26fe4e3 --- /dev/null +++ b/add_security_headers/index.js @@ -0,0 +1,47 @@ +exports.handler = (event, context, callback) => { + /* Get contents of response */ + const response = event.Records[0].cf.response; + const headers = response.headers; + + /* Add security headers */ + headers["strict-transport-security"] = [ + { + key: "Strict-Transport-Security", + value: "max-age=31536000; includeSubdomains; preload", + }, + ]; + headers["content-security-policy"] = [ + { + key: "Content-Security-Policy", + value: + "default-src 'none'; img-src 'none'; script-src 'none'; style-src 'none'; object-src 'none'", + }, + ]; + headers["x-content-type-options"] = [ + { + key: "X-Content-Type-Options", + value: "nosniff", + }, + ]; + headers["x-frame-options"] = [ + { + key: "X-Frame-Options", + value: "DENY", + }, + ]; + headers["x-xss-protection"] = [ + { + key: "X-XSS-Protection", + value: "1; mode=block", + }, + ]; + headers["referrer-policy"] = [ + { + key: "Referrer-Policy", + value: "same-origin", + }, + ]; + + /* Return the modified response */ + callback(null, response); +}; diff --git a/backend.tf b/backend.tf new file mode 100644 index 0000000..f759744 --- /dev/null +++ b/backend.tf @@ -0,0 +1,10 @@ +terraform { + backend "s3" { + encrypt = true + bucket = "cisa-cool-terraform-state" + dynamodb_table = "terraform-state-lock" + profile = "cool-terraform-backend" + region = "us-east-1" + key = "publish-egress-ip-terraform/terraform.tfstate" + } +} diff --git a/cloudfront.tf b/cloudfront.tf new file mode 100644 index 0000000..26da197 --- /dev/null +++ b/cloudfront.tf @@ -0,0 +1,158 @@ +# ------------------------------------------------------------------------------ +# The CloudFront distribution and related S3/Lambda resources that allow us to +# use an HTTPS endpoint, which S3 websites do not support natively. +# ------------------------------------------------------------------------------ + +locals { + # bucket origin id + s3_origin_id = "S3-${aws_s3_bucket.egress_info.id}" +} + +data "aws_acm_certificate" "rules_cert" { + # This certificate must exist prior to applying this Terraform. + # For an example, see cisagov/cool-dns-cyber.dhs.gov/acm_rules_vm.tf + provider = aws.deploy + + domain = var.domain + most_recent = true + statuses = ["ISSUED"] + types = ["AMAZON_ISSUED"] +} + +# An S3 bucket where artifacts for the Lambda@Edge can be stored +resource "aws_s3_bucket" "lambda_at_edge" { + provider = aws.deploy + + bucket_prefix = "publish-egress-ip-lambda-at-edge-" +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "lambda_at_edge" { + provider = aws.deploy + + bucket = aws_s3_bucket.lambda_at_edge.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket_versioning" "lambda_at_edge" { + provider = aws.deploy + + bucket = aws_s3_bucket.lambda_at_edge.id + + versioning_configuration { + status = "Enabled" + } +} + +# This blocks ANY public access to the bucket or the objects it +# contains, even if misconfigured to allow public access. +resource "aws_s3_bucket_public_access_block" "lambda_artifact_bucket" { + provider = aws.deploy + + block_public_acls = true + block_public_policy = true + bucket = aws_s3_bucket.lambda_at_edge.id + ignore_public_acls = true + restrict_public_buckets = true +} + +# A Lambda@Edge for injecting security headers +module "security_header_lambda" { + providers = { + aws = aws.deploy + } + source = "transcend-io/lambda-at-edge/aws" + version = "0.5.0" + + description = "Adds HSTS and other security headers to the response." + lambda_code_source_dir = "${path.root}/add_security_headers" + name = "add_security_headers" + # nodejs18.x appears to be the latest supported runtime until we move beyond + # version 4.9 of the Terraform AWS provider. + runtime = "nodejs18.x" + s3_artifact_bucket = aws_s3_bucket.lambda_at_edge.id +} + +resource "aws_cloudfront_origin_access_control" "egress_info" { + provider = aws.deploy + + description = var.cloudfront_distribution_oac_description + name = var.cloudfront_distribution_oac_name + + origin_access_control_origin_type = "s3" + signing_behavior = "always" + signing_protocol = "sigv4" +} + +resource "aws_cloudfront_distribution" "egress_info" { + provider = aws.deploy + + aliases = [var.domain] + comment = "Created by cisagov/publish-egress-ip-terraform." + default_root_object = var.root_object + enabled = true + is_ipv6_enabled = true + price_class = "PriceClass_100" + + custom_error_response { + error_caching_min_ttl = 30 + error_code = 403 + response_code = 200 + response_page_path = "/${var.root_object}" + } + + custom_error_response { + error_caching_min_ttl = 30 + error_code = 404 + response_code = 200 + response_page_path = "/${var.root_object}" + } + + default_cache_behavior { + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + compress = true + default_ttl = 30 + max_ttl = 30 + min_ttl = 0 + target_origin_id = local.s3_origin_id + viewer_protocol_policy = "redirect-to-https" + + forwarded_values { + query_string = false + cookies { + forward = "none" + } + } + + lambda_function_association { + # Inject security headers via Lambda@Edge + event_type = "origin-response" + include_body = false + lambda_arn = module.security_header_lambda.arn + } + } + + origin { + domain_name = aws_s3_bucket.egress_info.bucket_regional_domain_name + origin_access_control_id = aws_cloudfront_origin_access_control.egress_info.id + origin_id = local.s3_origin_id + } + + restrictions { + geo_restriction { + locations = ["AS", "GU", "MP", "PR", "US", "VI"] + restriction_type = "whitelist" + } + } + + viewer_certificate { + acm_certificate_arn = data.aws_acm_certificate.rules_cert.arn + minimum_protocol_version = "TLSv1.1_2016" + ssl_support_method = "sni-only" + } +} diff --git a/examples/basic_usage/.terraform-docs.yml b/examples/basic_usage/.terraform-docs.yml deleted file mode 120000 index 2afdcf8..0000000 --- a/examples/basic_usage/.terraform-docs.yml +++ /dev/null @@ -1 +0,0 @@ -../../.terraform-docs.yml \ No newline at end of file diff --git a/examples/basic_usage/README.md b/examples/basic_usage/README.md deleted file mode 100644 index d120104..0000000 --- a/examples/basic_usage/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# Launch an example EC2 instance in a new VPC # - -## Usage ## - -To run this example you need to execute the `terraform init` command -followed by the `terraform apply` command. - -Note that this example may create resources which cost money. Run -`terraform destroy` when you no longer need these resources. - - -## Requirements ## - -| Name | Version | -|------|---------| -| terraform | ~> 1.0 | -| aws | ~> 4.9 | - -## Providers ## - -| Name | Version | -|------|---------| -| aws | ~> 4.9 | - -## Modules ## - -| Name | Source | Version | -|------|--------|---------| -| example | ../../ | n/a | - -## Resources ## - -| Name | Type | -|------|------| -| [aws_subnet.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | -| [aws_vpc.example](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | - -## Inputs ## - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| ami\_owner\_account\_id | The ID of the AWS account that owns the AMI, or "self" if the AMI is owned by the same account as the provisioner. | `string` | `"self"` | no | -| aws\_availability\_zone | The AWS availability zone to deploy into (e.g. a, b, c, etc.). | `string` | `"a"` | no | -| aws\_region | The AWS region to deploy into (e.g. us-east-1). | `string` | `"us-east-1"` | no | -| tags | Tags to apply to all AWS resources created. | `map(string)` | ```{ "Testing": true }``` | no | -| tf\_role\_arn | The ARN of the role that can terraform non-specialized resources. | `string` | n/a | yes | - -## Outputs ## - -| Name | Description | -|------|-------------| -| arn | The EC2 instance ARN. | -| availability\_zone | The AZ where the EC2 instance is deployed. | -| id | The EC2 instance ID. | -| private\_ip | The private IP of the EC2 instance. | -| subnet\_id | The ID of the subnet where the EC2 instance is deployed. | - diff --git a/examples/basic_usage/main.tf b/examples/basic_usage/main.tf deleted file mode 100644 index efdc5bc..0000000 --- a/examples/basic_usage/main.tf +++ /dev/null @@ -1,26 +0,0 @@ -provider "aws" { - # Our primary provider uses our terraform role - assume_role { - role_arn = var.tf_role_arn - session_name = "terraform-example" - } - default_tags { - tags = var.tags - } - region = var.aws_region -} - -#------------------------------------------------------------------------------- -# Configure the example module. -#------------------------------------------------------------------------------- -module "example" { - source = "../../" - providers = { - aws = aws - } - - ami_owner_account_id = var.ami_owner_account_id - aws_availability_zone = var.aws_availability_zone - aws_region = var.aws_region - subnet_id = aws_subnet.example.id -} diff --git a/examples/basic_usage/outputs.tf b/examples/basic_usage/outputs.tf deleted file mode 100644 index 542df31..0000000 --- a/examples/basic_usage/outputs.tf +++ /dev/null @@ -1,24 +0,0 @@ -output "arn" { - description = "The EC2 instance ARN." - value = module.example.arn -} - -output "availability_zone" { - description = "The AZ where the EC2 instance is deployed." - value = module.example.availability_zone -} - -output "id" { - description = "The EC2 instance ID." - value = module.example.id -} - -output "private_ip" { - description = "The private IP of the EC2 instance." - value = module.example.private_ip -} - -output "subnet_id" { - description = "The ID of the subnet where the EC2 instance is deployed." - value = module.example.subnet_id -} diff --git a/examples/basic_usage/variables.tf b/examples/basic_usage/variables.tf deleted file mode 100644 index 70b275a..0000000 --- a/examples/basic_usage/variables.tf +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------------ -# Required parameters -# -# You must provide a value for each of these parameters. -# ------------------------------------------------------------------------------ - -variable "tf_role_arn" { - description = "The ARN of the role that can terraform non-specialized resources." - type = string -} - -# ------------------------------------------------------------------------------ -# Optional parameters -# -# These parameters have reasonable defaults. -# ------------------------------------------------------------------------------ - -variable "ami_owner_account_id" { - default = "self" - description = "The ID of the AWS account that owns the AMI, or \"self\" if the AMI is owned by the same account as the provisioner." - type = string -} - -variable "aws_availability_zone" { - default = "a" - description = "The AWS availability zone to deploy into (e.g. a, b, c, etc.)." - type = string -} - -variable "aws_region" { - default = "us-east-1" - description = "The AWS region to deploy into (e.g. us-east-1)." - type = string -} - -variable "tags" { - default = { - Testing = true - } - description = "Tags to apply to all AWS resources created." - type = map(string) -} diff --git a/examples/basic_usage/versions.tf b/examples/basic_usage/versions.tf deleted file mode 100644 index 9db27b0..0000000 --- a/examples/basic_usage/versions.tf +++ /dev/null @@ -1,23 +0,0 @@ -terraform { - # If you use any other providers you should also pin them to the - # major version currently being used. This practice will help us - # avoid unwelcome surprises. - required_providers { - # Version 4.9 of the Terraform AWS provider made changes to the S3 bucket - # refactor that is in place for versions 4.0-4.8 of the provider. With v4.9 - # only non-breaking changes and deprecation notices are introduced. Using - # this version will simplify migration to the new, broken out AWS S3 bucket - # configuration resources. Please see - # https://github.com/hashicorp/terraform-provider-aws/pull/23985 - # for more information about the changes in v4.9 and - # https://www.hashicorp.com/blog/terraform-aws-provider-4-0-refactors-s3-bucket-resource - # for more information about the S3 bucket refactor. - aws = { - source = "hashicorp/aws" - version = "~> 4.9" - } - } - - # We want to hold off on 1.1 or higher until we have tested it. - required_version = "~> 1.0" -} diff --git a/examples/basic_usage/vpc.tf b/examples/basic_usage/vpc.tf deleted file mode 100644 index 947e0eb..0000000 --- a/examples/basic_usage/vpc.tf +++ /dev/null @@ -1,20 +0,0 @@ -#------------------------------------------------------------------------------- -# Create a VPC -#------------------------------------------------------------------------------- - -resource "aws_vpc" "example" { - cidr_block = "10.230.0.0/24" - enable_dns_hostnames = true - tags = { "Name" : "Example" } -} - -#------------------------------------------------------------------------------- -# Create a subnet -#------------------------------------------------------------------------------- - -resource "aws_subnet" "example" { - availability_zone = "${var.aws_region}${var.aws_availability_zone}" - cidr_block = "10.230.0.0/28" - tags = { "Name" : "Example" } - vpc_id = aws_vpc.example.id -} diff --git a/lambda.tf b/lambda.tf new file mode 100644 index 0000000..1f64caa --- /dev/null +++ b/lambda.tf @@ -0,0 +1,69 @@ +# ------------------------------------------------------------------------------ +# The AWS Lambda function that is used to publish egress IP addresses. +# The ZIP file is created with: +# http://github.com/cisagov/publish-egress-ip-lambda +# ------------------------------------------------------------------------------ + +resource "aws_lambda_function" "publish_egress_ip" { + provider = aws.deploy + + description = var.lambda_function_description + filename = var.lambda_zip_filename + function_name = var.lambda_function_name + handler = "lambda_handler.handler" + memory_size = 128 + role = aws_iam_role.lambdaexecution_role.arn + runtime = "python3.9" + source_code_hash = filebase64sha256(var.lambda_zip_filename) + timeout = 900 +} + +# The CloudWatch log group for the Lambda function +resource "aws_cloudwatch_log_group" "lambda_logs" { + provider = aws.deploy + + name = format("/aws/lambda/%s", var.lambda_function_name) + retention_in_days = 30 +} + +# Schedule the Lambda function to run every X minute(s) +resource "aws_cloudwatch_event_rule" "lambda_schedule" { + provider = aws.deploy + + description = format("Executes %s Lambda every %d minute(s).", var.lambda_function_name, var.lambda_schedule_interval) + name = format("%s-every-%d-minutes", var.lambda_function_name, var.lambda_schedule_interval) + schedule_expression = format("rate(%d minute%s)", var.lambda_schedule_interval, var.lambda_schedule_interval != 1 ? "s" : "") +} + +resource "aws_cloudwatch_event_target" "lambda_schedule" { + provider = aws.deploy + + arn = aws_lambda_function.publish_egress_ip.arn + + input = jsonencode({ + account_ids = tolist(local.accounts_to_check) + application_tag = var.application_tag + bucket_name = var.bucket_name + domain = var.domain + ec2_read_role_name = var.ec2_read_role_name + file_configs = var.file_configs + file_header = var.file_header + publish_egress_tag = var.publish_egress_tag + region_filters = var.region_filters + task = "publish" + }) + + rule = aws_cloudwatch_event_rule.lambda_schedule.name + target_id = "lambda" +} + +# Allow the CloudWatch event to invoke the Lambda function +resource "aws_lambda_permission" "allow_cloudwatch" { + provider = aws.deploy + + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.publish_egress_ip.function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.lambda_schedule.arn + statement_id = "AllowExecutionFromCloudWatch" +} diff --git a/lambda_assume_role_policy_doc.tf b/lambda_assume_role_policy_doc.tf new file mode 100644 index 0000000..e37c63e --- /dev/null +++ b/lambda_assume_role_policy_doc.tf @@ -0,0 +1,18 @@ +# ------------------------------------------------------------------------------ +# Create an IAM policy document that only allows AWS Lambdas to assume the +# role this policy is attached to. +# ------------------------------------------------------------------------------ + +data "aws_iam_policy_document" "lambda_assume_role_doc" { + statement { + actions = [ + "sts:AssumeRole", + "sts:TagSession", + ] + + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + } +} diff --git a/lambda_policy.tf b/lambda_policy.tf new file mode 100644 index 0000000..e29e0c0 --- /dev/null +++ b/lambda_policy.tf @@ -0,0 +1,53 @@ +# ------------------------------------------------------------------------------ +# Create the IAM policy that allows the publish-egress-ip Lambda to access +# all resources needed to do its job. +# ------------------------------------------------------------------------------ + +data "aws_iam_policy_document" "lambdaexecution_doc" { + statement { + actions = [ + "logs:CreateLogGroup", + ] + resources = [ + format("arn:aws:logs:%s:%s:*", var.aws_region, local.deployment_account_id) + ] + } + + statement { + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ] + resources = [ + format("arn:aws:logs:%s:%s:log-group:/aws/lambda/%s:*", + var.aws_region, local.deployment_account_id, var.lambda_function_name) + ] + } + + statement { + actions = [ + "s3:PutObject", + ] + resources = [ + "${aws_s3_bucket.egress_info.arn}/*" + ] + } + + statement { + actions = [ + "sts:AssumeRole", + "sts:TagSession", + ] + resources = [ + format("arn:aws:iam::*:role/%s", var.ec2_read_role_name), + ] + } +} + +resource "aws_iam_policy" "lambdaexecution_policy" { + provider = aws.deploy + + description = var.lambdaexecution_role_description + name = var.lambdaexecution_role_name + policy = data.aws_iam_policy_document.lambdaexecution_doc.json +} diff --git a/lambda_role.tf b/lambda_role.tf new file mode 100644 index 0000000..af0a71e --- /dev/null +++ b/lambda_role.tf @@ -0,0 +1,19 @@ +# ------------------------------------------------------------------------------ +# Create the IAM role that allows the publish-egress-ip Lambda to access +# all resources needed to do its job. +# ------------------------------------------------------------------------------ + +resource "aws_iam_role" "lambdaexecution_role" { + provider = aws.deploy + + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_doc.json + description = var.lambdaexecution_role_description + name = var.lambdaexecution_role_name +} + +resource "aws_iam_role_policy_attachment" "lambdaexecution_policy_attachment" { + provider = aws.deploy + + policy_arn = aws_iam_policy.lambdaexecution_policy.arn + role = aws_iam_role.lambdaexecution_role.name +} diff --git a/locals.tf b/locals.tf new file mode 100644 index 0000000..0edb7d0 --- /dev/null +++ b/locals.tf @@ -0,0 +1,43 @@ +# ------------------------------------------------------------------------------ +# Retrieve the effective Account ID, User ID, and ARN in which Terraform is +# authorized. This is used to calculate the session names for assumed roles. +# ------------------------------------------------------------------------------ +data "aws_caller_identity" "current" {} + +# ------------------------------------------------------------------------------ +# Retrieve the effective Account ID, User ID, and ARN from the deployment +# account provider. +# ------------------------------------------------------------------------------ +data "aws_caller_identity" "deploy" { + provider = aws.deploy +} + +# ------------------------------------------------------------------------------ +# Retrieve the information for all accounts in the organization. This is used +# to lookup the Users account ID for use in the assume role policy. +# ------------------------------------------------------------------------------ +data "aws_organizations_organization" "org" { + provider = aws.organizationsreadonly +} + +# ------------------------------------------------------------------------------ +# Evaluate expressions for use throughout this configuration. +# ------------------------------------------------------------------------------ +locals { + accounts_to_check = toset(concat([ + for account in data.aws_organizations_organization.org.non_master_accounts : + account.id + if length(regexall(var.account_name_regex, account.name)) > 0 + ], var.extraorg_account_ids)) + + # Extract the user name of the current caller for use + # as assume role session names. + caller_user_name = split("/", data.aws_caller_identity.current.arn)[1] + + # The Route53 hosted zone ID for every CloudFront distribution is always the + # same, as mentioned here: + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-route53-aliastarget.html#cfn-route53-aliastarget-hostedzoneid + cloudfront_zone_id = "Z2FDTNDATAQYW2" + + deployment_account_id = data.aws_caller_identity.deploy.account_id +} diff --git a/main.tf b/main.tf deleted file mode 100644 index 0646a05..0000000 --- a/main.tf +++ /dev/null @@ -1,61 +0,0 @@ -# ------------------------------------------------------------------------------ -# Deploy the example AMI from cisagov/skeleton-packer in AWS. -# ------------------------------------------------------------------------------ - -# ------------------------------------------------------------------------------ -# Look up the latest example AMI from cisagov/skeleton-packer. -# -# NOTE: This Terraform data source must return at least one AMI result -# or the apply will fail. -# ------------------------------------------------------------------------------ - -# The AMI from cisagov/skeleton-packer -data "aws_ami" "example" { - filter { - name = "name" - values = [ - "example-hvm-*-x86_64-ebs", - ] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "root-device-type" - values = ["ebs"] - } - - most_recent = true - owners = [ - var.ami_owner_account_id - ] -} - -# The default tags configured for the default provider -data "aws_default_tags" "default" {} - -# The example EC2 instance -resource "aws_instance" "example" { - ami = data.aws_ami.example.id - availability_zone = "${var.aws_region}${var.aws_availability_zone}" - instance_type = "t3.micro" - subnet_id = var.subnet_id - - # The tag or tags specified here will be merged with the provider's - # default tags. - tags = { - "Name" = "Example" - } - # volume_tags does not yet inherit the default tags from the - # provider. See hashicorp/terraform-provider-aws#19188 for more - # details. - volume_tags = merge( - data.aws_default_tags.default.tags, - { - "Name" = "Example" - }, - ) -} diff --git a/outputs.tf b/outputs.tf index ce18699..42499ba 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,24 +1,4 @@ -output "arn" { - description = "The EC2 instance ARN." - value = aws_instance.example.arn -} - -output "availability_zone" { - description = "The AZ where the EC2 instance is deployed." - value = aws_instance.example.availability_zone -} - -output "id" { - description = "The EC2 instance ID." - value = aws_instance.example.id -} - -output "private_ip" { - description = "The private IP of the EC2 instance." - value = aws_instance.example.private_ip -} - -output "subnet_id" { - description = "The ID of the subnet where the EC2 instance is deployed." - value = aws_instance.example.subnet_id +output "bucket" { + value = aws_s3_bucket.egress_info + description = "The S3 bucket where egress IP address information is published." } diff --git a/providers.tf b/providers.tf index bc1ee01..039122e 100644 --- a/providers.tf +++ b/providers.tf @@ -1,13 +1,50 @@ -# This is an example of what a provider looks like. -# -# provider "aws" { -# alias = "myprovider" -# assume_role { -# role_arn = "arn:aws:iam::123456789012:role/MyRole" -# session_name = "MySessionName" -# } -# default_tags { -# tags = var.tags -# } -# region = var.aws_region -# } +# This is the "default" provider that is used to obtain the caller's +# credentials, which are used to set the session name when assuming roles in +# the other providers. + +provider "aws" { + default_tags { + tags = var.tags + } + region = var.aws_region +} + +# The provider used to create resources inside the AWS account where +# the Lambda and S3 bucket will be deployed. +provider "aws" { + alias = "deploy" + assume_role { + role_arn = var.deployment_role_arn + session_name = local.caller_user_name + } + default_tags { + tags = var.tags + } + region = var.aws_region +} + +# The provider that can modify Route53 resources in the DNS account. +provider "aws" { + alias = "route53resourcechange" + assume_role { + role_arn = var.route53_role_arn + session_name = local.caller_user_name + } + default_tags { + tags = var.tags + } + region = var.aws_region +} + +# The provider used to lookup account IDs in the AWS organization. See locals. +provider "aws" { + alias = "organizationsreadonly" + assume_role { + role_arn = data.terraform_remote_state.master.outputs.organizationsreadonly_role.arn + session_name = local.caller_user_name + } + default_tags { + tags = var.tags + } + region = var.aws_region +} diff --git a/remote_states.tf b/remote_states.tf new file mode 100644 index 0000000..34e0b30 --- /dev/null +++ b/remote_states.tf @@ -0,0 +1,59 @@ +data "terraform_remote_state" "dns" { + backend = "s3" + + config = { + encrypt = true + bucket = "cisa-cool-terraform-state" + dynamodb_table = "terraform-state-lock" + profile = "cool-terraform-backend" + region = "us-east-1" + key = "cool-accounts/dns.tfstate" + } + + workspace = "production" +} + +data "terraform_remote_state" "dns_cyber_dhs_gov" { + backend = "s3" + + config = { + encrypt = true + bucket = "cisa-cool-terraform-state" + dynamodb_table = "terraform-state-lock" + profile = "cool-terraform-readstate" + region = "us-east-1" + key = "cool-dns-cyber.dhs.gov.tfstate" + } + + workspace = "production" +} + +data "terraform_remote_state" "master" { + backend = "s3" + + config = { + encrypt = true + bucket = "cisa-cool-terraform-state" + dynamodb_table = "terraform-state-lock" + profile = "cool-terraform-backend" + region = "us-east-1" + key = "cool-accounts/master.tfstate" + } + + workspace = "production" +} + +data "terraform_remote_state" "terraform" { + backend = "s3" + + config = { + encrypt = true + bucket = "cisa-cool-terraform-state" + dynamodb_table = "terraform-state-lock" + profile = "cool-terraform-backend" + region = "us-east-1" + key = "cool-accounts/terraform.tfstate" + } + + workspace = "production" +} diff --git a/route53.tf b/route53.tf new file mode 100644 index 0000000..eb398de --- /dev/null +++ b/route53.tf @@ -0,0 +1,31 @@ +# ------------------------------------------------------------------------------ +# DNS records that support the CloudFront endpoints and application. +# ------------------------------------------------------------------------------ + +resource "aws_route53_record" "rules_vm_A" { + provider = aws.route53resourcechange + + name = var.domain + type = "A" + zone_id = data.terraform_remote_state.dns_cyber_dhs_gov.outputs.cyber_dhs_gov_zone.id + + alias { + evaluate_target_health = false + name = aws_cloudfront_distribution.egress_info.domain_name + zone_id = local.cloudfront_zone_id + } +} + +resource "aws_route53_record" "rules_vm_AAAA" { + provider = aws.route53resourcechange + + name = var.domain + type = "AAAA" + zone_id = data.terraform_remote_state.dns_cyber_dhs_gov.outputs.cyber_dhs_gov_zone.id + + alias { + evaluate_target_health = false + name = aws_cloudfront_distribution.egress_info.domain_name + zone_id = local.cloudfront_zone_id + } +} diff --git a/s3.tf b/s3.tf new file mode 100644 index 0000000..e434498 --- /dev/null +++ b/s3.tf @@ -0,0 +1,106 @@ +# The S3 bucket where the published file(s) will be stored. +resource "aws_s3_bucket" "egress_info" { + provider = aws.deploy + + bucket = var.bucket_name +} + + +# Policy that only allows the CloudFront distribution to read from the bucket. +data "aws_iam_policy_document" "egress_info" { + policy_id = "egress_info_s3_bucket" + + statement { + actions = [ + "s3:GetObject" + ] + + condition { + test = "StringEquals" + variable = "AWS:SourceArn" + + values = [ + aws_cloudfront_distribution.egress_info.arn + ] + } + + principals { + identifiers = ["cloudfront.amazonaws.com"] + type = "Service" + } + + resources = [ + "${aws_s3_bucket.egress_info.arn}/*" + ] + } + + statement { + actions = ["s3:ListBucket"] + + condition { + test = "StringEquals" + variable = "AWS:SourceArn" + + values = [ + aws_cloudfront_distribution.egress_info.arn + ] + } + + principals { + identifiers = ["cloudfront.amazonaws.com"] + type = "Service" + } + + resources = [aws_s3_bucket.egress_info.arn] + } +} + +# Any objects placed into this bucket should be owned by the bucket owner. This +# ensures that even if objects are added by a different account, the +# bucket-owning account retains full control over the objects stored in this +# bucket. +resource "aws_s3_bucket_ownership_controls" "egress_info" { + provider = aws.deploy + + bucket = aws_s3_bucket.egress_info.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} + +# Apply our read-only policy to the bucket. +resource "aws_s3_bucket_policy" "egress_info" { + provider = aws.deploy + + bucket = aws_s3_bucket.egress_info.id + policy = data.aws_iam_policy_document.egress_info.json + + depends_on = [ + aws_s3_bucket_public_access_block.egress_info, + ] +} + +# This blocks ANY public access to the bucket or the objects it contains, even +# if misconfigured to allow public access. +resource "aws_s3_bucket_public_access_block" "egress_info" { + provider = aws.deploy + + block_public_acls = true + block_public_policy = true + bucket = aws_s3_bucket.egress_info.id + ignore_public_acls = true + restrict_public_buckets = true +} + +# Set the default server-side encryption for the bucket to AES256. +resource "aws_s3_bucket_server_side_encryption_configuration" "egress_info" { + provider = aws.deploy + + bucket = aws_s3_bucket.egress_info.id + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} diff --git a/variables.tf b/variables.tf index 416ad14..c3a33bf 100644 --- a/variables.tf +++ b/variables.tf @@ -4,8 +4,23 @@ # You must provide a value for each of these parameters. # ------------------------------------------------------------------------------ -variable "subnet_id" { - description = "The ID of the AWS subnet to deploy into (e.g. subnet-0123456789abcdef0)." +variable "bucket_name" { + description = "The name of the S3 bucket to publish egress IP address information to." + type = string +} + +variable "domain" { + description = "The domain hosting the published file(s) containing egress IPs. Also used for the CloudFront distribution and certificate." + type = string +} + +variable "deployment_role_arn" { + description = "The ARN of the IAM role to use to deploy the Lambda and all related resources." + type = string +} + +variable "route53_role_arn" { + description = "The ARN of the IAM role to use to modify Route53 DNS resources." type = string } @@ -14,15 +29,16 @@ variable "subnet_id" { # # These parameters have reasonable defaults. # ------------------------------------------------------------------------------ -variable "ami_owner_account_id" { - default = "self" - description = "The ID of the AWS account that owns the Example AMI, or \"self\" if the AMI is owned by the same account as the provisioner." + +variable "account_name_regex" { + default = "^$" + description = "A regular expression that will be applied against the names of all non-master accounts in the AWS organization. If the name of an account matches the regular expression, that account will be queried for egress IP addresses to publish. The default value should not match any valid account name." type = string } -variable "aws_availability_zone" { - default = "a" - description = "The AWS availability zone to deploy into (e.g. a, b, c, etc.)." +variable "application_tag" { + default = "Application" + description = "The name of the AWS tag whose value represents the application associated with an IP address." type = string } @@ -31,3 +47,104 @@ variable "aws_region" { description = "The AWS region to deploy into (e.g. us-east-1)." type = string } + +variable "cloudfront_distribution_oac_description" { + default = "Allow CloudFront to securely read from an S3 bucket." + description = "The description to apply to the CloudFront Origin Access Control." + type = string +} + +variable "cloudfront_distribution_oac_name" { + default = "publish-egress-ip-s3-distribution" + description = "The name for the CloudFront Origin Access Control." + type = string +} + +variable "ec2_read_role_name" { + default = "EC2ReadOnly" + description = "The name of the IAM role that allows read access to the necessary EC2 attributes. Note that this role must exist in each account that you want to query." + type = string +} + +variable "extraorg_account_ids" { + default = [] + description = "A list of AWS account IDs corresponding to \"extra\" accounts that you want to query for egress IPs to publish." + type = list(string) +} + +variable "file_configs" { + default = [] + type = list(object({ app_regex = string, description = string, filename = string, static_ips = list(string) })) + description = "A list of objects that define the files to be published. \"app_regex\" specifies a regular expression matching the application name (based on the variable var.application_tag). \"description\" is the description of the published file. \"filename\" is the name to assign the published file. \"static_ips\" is a list of CIDR blocks that will always be included in the published file. An example file configuration looks like this: `[{\"app_regex\": \".*\", \"description\": \"This file contains a list of all public IP addresses to be published.\", \"filename\": \"all.txt\", \"static_ips\": []}, {\"app_regex\": \"^Vulnerability Scanning$\", \"description\": \"This file contains a list of all IPs used for Vulnerability Scanning.\", \"filename\": \"vs.txt\", \"static_ips\": [\"192.168.1.1/32\", \"192.168.2.2/32\"]}]`" +} + +variable "file_header" { + default = ["###", "# https://{domain}/{filename}", "# {timestamp}", "# {description}", "###"] + description = "The header template for each published file. When the file is published, newline characters are automatically added between each item in the list. The following variables are available within the template: {domain} - the domain where the published files are located, {filename} - the name of the published file, {timestamp} - the timestamp when the file was published, {description} - the description of the published file" + type = list(string) +} + +variable "lambda_function_description" { + default = "Lambda function to publish egress IP addresses to an S3 bucket configured with a CloudFront distribution for HTTPS access." + description = "The description of the Lambda function." + type = string +} + +variable "lambda_function_name" { + default = "publish-egress-ip" + description = "The name of the Lambda function to publish egress IP addresses." + type = string +} + +variable "lambda_schedule_interval" { + default = 60 + description = "The number of minutes between scheduled runs of the Lambda function to publish egress IP addresses. This value must be an integer greater than 0." + type = number + + validation { + condition = alltrue([floor(var.lambda_schedule_interval) == var.lambda_schedule_interval, var.lambda_schedule_interval > 0]) + error_message = "lambda_schedule_interval must be an integer greater than zero." + } +} + +variable "lambda_zip_filename" { + default = "lambda_build.zip" + description = "The name of the ZIP file containing the Lambda function deployment package to publish egress IP addresses. The file must be located in the root directory of this project." + type = string +} + +variable "lambdaexecution_role_description" { + default = "Allows the publish-egress-ip Lambda to query other accounts for public EC2 IP information, publish objects to the S3 bucket, and write CloudWatch logs." + description = "The description to associate with the IAM role (and policy) that allows the publish-egress-ip Lambda to query other accounts for public EC2 IP information, publish objects to the S3 bucket, and write CloudWatch logs." + type = string +} + +variable "lambdaexecution_role_name" { + default = "PublishEgressIPLambda" + description = "The name to assign the IAM role (and policy) that allows the publish-egress-ip Lambda to query other accounts for public EC2 IP information, publish objects to the S3 bucket, and write CloudWatch logs." + type = string +} + +variable "publish_egress_tag" { + default = "Publish Egress" + description = "The name of the AWS resource tag whose value represents whether the EC2 instance or elastic IP should have its public IP address published." + type = string +} + +variable "region_filters" { + default = [] + description = "A list of AWS EC2 region filters to use when querying for IP addresses to publish. If a filter is not specified, the query will be performed in all regions. An example filter to restrict to US regions looks like this: `[{ \"Name\" : \"endpoint\", \"Values\" : [\"*.us-*\"] }]`. For more information, refer to ." + type = list(object({ Name = string, Values = list(string) })) +} + +variable "root_object" { + default = "all.txt" + description = "The root object in the S3 bucket to serve when no path is provided or an error occurs." + type = string +} + +variable "tags" { + type = map(string) + description = "Tags to apply to all AWS resources created." + default = {} +}