From 60b9d12772792f5935c891d5e691f84644c58f6d Mon Sep 17 00:00:00 2001 From: Triet Le Date: Mon, 8 Jul 2019 14:08:31 -0400 Subject: [PATCH 01/34] Adding vault module and salt formula --- .gitignore | 6 + .travis.yml | 2 - Makefile | 2 +- README.md | 48 ++++ main.tf | 319 +++++++++++++++++++++++++++ modules/bucket/bucket_policy.json | 17 ++ modules/bucket/main.tf | 53 +++++ modules/iam/iam_policy.json | 96 ++++++++ modules/iam/main.tf | 110 +++++++++ outputs.tf | 5 +- salt/_states/vault.py | 95 ++++++++ salt/vault/configure.sls | 20 ++ salt/vault/files/server.hcl.jinja | 24 ++ salt/vault/files/vault.conf.jinja | 24 ++ salt/vault/files/vault.service.jinja | 20 ++ salt/vault/firewall.sls | 25 +++ salt/vault/init.sls | 6 + salt/vault/initialize.sls | 8 + salt/vault/install.sls | 86 ++++++++ salt/vault/map.jinja | 17 ++ salt/vault/maps/defaults.yaml | 13 ++ salt/vault/maps/initfamilymap.yaml | 10 + salt/vault/maps/osfamilymap.yaml | 35 +++ salt/vault/service.sls | 24 ++ scripts/appscript.sh | 63 ++++++ variables.tf | 204 +++++++++++++++++ 26 files changed, 1328 insertions(+), 4 deletions(-) mode change 100755 => 100644 main.tf create mode 100644 modules/bucket/bucket_policy.json create mode 100644 modules/bucket/main.tf create mode 100644 modules/iam/iam_policy.json create mode 100644 modules/iam/main.tf mode change 100755 => 100644 outputs.tf create mode 100644 salt/_states/vault.py create mode 100644 salt/vault/configure.sls create mode 100644 salt/vault/files/server.hcl.jinja create mode 100644 salt/vault/files/vault.conf.jinja create mode 100644 salt/vault/files/vault.service.jinja create mode 100644 salt/vault/firewall.sls create mode 100644 salt/vault/init.sls create mode 100644 salt/vault/initialize.sls create mode 100644 salt/vault/install.sls create mode 100644 salt/vault/map.jinja create mode 100644 salt/vault/maps/defaults.yaml create mode 100644 salt/vault/maps/initfamilymap.yaml create mode 100644 salt/vault/maps/osfamilymap.yaml create mode 100644 salt/vault/service.sls create mode 100644 scripts/appscript.sh mode change 100755 => 100644 variables.tf diff --git a/.gitignore b/.gitignore index 22a2084..3439d2c 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,9 @@ # .tfvars files *.tfvars + +# temp archive files +.files/ + +# other uncessarry files +.DS_Store diff --git a/.travis.yml b/.travis.yml index c3513ea..0ffc3b2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,8 +45,6 @@ jobs: (set -x; git tag -a $RELEASE_VERSION -m $RELEASE_VERSION) deploy: provider: releases - api_key: - secure: name: $RELEASE_VERSION body: $RELEASE_BODY tag_name: $RELEASE_VERSION diff --git a/Makefile b/Makefile index 69a76aa..038ebb7 100755 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ SHELL := bash .PHONY: guard/% %/install %/lint -GITHUB_ACCESS_TOKEN ?= +GITHUB_ACCESS_TOKEN ?= # Macro to return the download url for a github release # For latest release, use version=latest # To pin a release, use version=tags/ diff --git a/README.md b/README.md index 7151fdd..137b8f2 100755 --- a/README.md +++ b/README.md @@ -1,3 +1,51 @@ ## terraform-aws-vault Terraform module that installs and configures Hashicorp Vault cluster with HA DyanamoDb storage backend. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list | n/a | yes | +| ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | +| ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | +| ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | +| bucket\_name | The name of the bucket will be use to store app scripts and vault's salt formula. | string | n/a | yes | +| cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | +| cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | +| cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | +| desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | +| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | n/a | yes | +| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | n/a | yes | +| ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list | n/a | yes | +| environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | +| ingress\_cidr\_blocks | (Optional) List of CIDR block. | list | `` | no | +| instance\_type | Amazon EC2 instance type | string | `"t2.medium"` | no | +| ip\_data\_url | URL to get ip address of the current user | string | `"http://ipv4.icanhazip.com"` | no | +| key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | +| kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | n/a | yes | +| lb\_certificate\_arn | Arn of a created certificate to be use for the load balancer | string | n/a | yes | +| lb\_internal | Boolean indicating whether the load balancer is internal or external | string | `"false"` | no | +| lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | +| lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list | n/a | yes | +| max\_capacity | (Optional) Maximum number of instances in the Autoscaling Group | string | `"2"` | no | +| min\_capacity | (Optional) Minimum number of instances in the Autoscaling Group | string | `"1"` | no | +| name | Name of the vault stack, will be use to prefix resources | string | n/a | yes | +| pypi\_index\_url | (Optional) URL to the PyPi Index | string | `"https://pypi.org/simple"` | no | +| route53\_enabled | Creates Route53 DNS entries for Vault automatically | string | `"false"` | no | +| route53\_zone\_id | Zone ID for domain | string | n/a | yes | +| tags | (Optional) list of tags to include with resource | map | `` | no | +| toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | +| vault\_url | The DNS address that vault will be accessible at. Example: vault.domain.net | string | n/a | yes | +| vault\_version | Version of Vault to be installed on servers | string | n/a | yes | +| watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | +| watchmaker\_admin\_users | (Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance | string | `""` | no | +| watchmaker\_config | (Optional) URL to a Watchmaker config file | string | `""` | no | +| watchmaker\_ou\_path | (Optional) DN of the OU to place the instance when joining a domain. If blank and WatchmakerEnvironment enforces a domain join, the instance will be placed in a default container. Leave blank if not joining a domain, or if WatchmakerEnvironment is false | string | `""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| vault\_url | URL to access Vault UI | + diff --git a/main.tf b/main.tf old mode 100755 new mode 100644 index 8b13789..9d70373 --- a/main.tf +++ b/main.tf @@ -1 +1,320 @@ +terraform { + required_version = ">= 0.12" +} +### +### LOCALS +### + +locals { + name_id = "${var.name}-${random_string.this.result}" + vpc_id = "${data.aws_subnet.lb.0.vpc_id}" + role_name = "INSTANCE_VAULT_${data.aws_caller_identity.current.account_id}" + ssm_root_path = "vault/${var.environment}/${data.aws_caller_identity.current.account_id}/${var.name}" + public_ip = "${chomp(data.http.ip.body)}/32" + allow_inbound = "${compact(distinct(concat(list(local.public_ip), var.additional_ips_allow_inbound)))}" + archive_file_name = "salt.zip" + appscript_file_name = "appscript.sh" + archive_file_path = "${path.module}/.files/${local.archive_file_name}" + appscript_file_path = "${path.module}/scripts/${local.appscript_file_name}" + + tags = { + Environment = "${var.environment}" + } +} + +### +### DATA SOURCES +### + +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +data "aws_ami" "this" { + most_recent = "true" + + owners = ["${var.ami_owner}"] + + name_regex = "${var.ami_name_regex}" + + filter { + name = "name" + values = ["${var.ami_name_filter}"] + } +} + +data "http" "ip" { + url = "${var.ip_data_url}" +} + +data "aws_subnet" "lb" { + count = "${length(var.lb_subnet_ids)}" + + id = "${var.lb_subnet_ids[count.index]}" +} + +data "aws_kms_key" "this" { + key_id = "${var.kms_key_id}" +} + +data "archive_file" "salt" { + type = "zip" + source_dir = "salt" + output_path = "${local.archive_file_path}" +} + +# Manage Bucket module +module "s3_bucket" { + source = "./modules/bucket" + + bucket_name = "${var.bucket_name}" +} + +# Manage IAM module +module "iam" { + source = "./modules/iam" + + bucket_name = "${module.s3_bucket.bucket_name}" + dynamodb_table = "${var.dynamodb_table}" + environment = "${var.environment}" + kms_key_id = "${data.aws_kms_key.this.key_id}" + name = "${var.name}" + role_name = "${local.role_name}" + ssm_root_path = "${local.ssm_root_path}" +} + +# Generate a random id for each deployment +resource "random_string" "this" { + length = 8 + special = "false" +} + +# Manage archive and appscript files +resource "aws_s3_bucket_object" "salt_zip" { + bucket = "${module.s3_bucket.bucket_name}" + key = "${random_string.this.result}/${local.archive_file_name}" + source = "${local.archive_file_path}" + etag = "${data.archive_file.salt.output_md5}" +} + +resource "aws_s3_bucket_object" "app_script" { + bucket = "${module.s3_bucket.bucket_name}" + key = "${random_string.this.result}/${local.appscript_file_name}" + source = "${local.appscript_file_path}" + etag = "${filemd5("${local.appscript_file_path}")}" +} + +# Manage domain record +resource "aws_route53_record" "this" { + count = "${var.route53_zone_id == "" || var.vault_url == "" ? 0 : 1}" + zone_id = "${var.route53_zone_id}" + name = "${var.vault_url}" + type = "A" + + alias { + name = "${aws_lb.this.dns_name}" + zone_id = "${aws_lb.this.zone_id}" + evaluate_target_health = false + } +} + +# Manage load balancer +resource "aws_lb" "this" { + name = "${var.name}-lb-${var.environment}" + internal = "false" + security_groups = ["${aws_security_group.lb.id}"] + subnets = "${var.lb_subnet_ids}" + + # access_logs { + # enabled = true + # bucket = "${module.bucket.bucket_name}" + # prefix = "logs/lb_access_logs" + # } + + tags = "${merge(map("Name", "${var.name}-lb"), local.tags)}" +} + +resource "aws_lb_listener" "http" { + load_balancer_arn = "${aws_lb.this.arn}" + port = "80" + protocol = "HTTP" + + default_action { + type = "redirect" + + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +resource "aws_lb_listener" "https" { + load_balancer_arn = "${aws_lb.this.arn}" + port = "443" + protocol = "HTTPS" + ssl_policy = "${var.lb_ssl_policy}" + certificate_arn = "${var.lb_certificate_arn}" + + default_action { + target_group_arn = "${aws_lb_target_group.this.arn}" + type = "forward" + } +} + +resource "aws_lb_target_group" "this" { + name = "${var.name}-${var.environment}" + port = "8200" + protocol = "HTTP" + vpc_id = "${local.vpc_id}" + + deregistration_delay = "10" + + # /sys/health will return 200 only if the vault instance + # is the leader. Meaning there will only ever be one healthy + # instance, but a failure will cause a new instance to + # be healthy automatically. This healthceck path prevents + # unnecessary redirect loops by not sending traffic to + # followers, which always just route traffic to the master + health_check { + path = "/v1/sys/health?standbyok=true" + port = "8200" + interval = "5" + timeout = "3" + healthy_threshold = "2" + unhealthy_threshold = "2" + } + + tags = "${merge( + map("Name", "${var.name}-tg"), + local.tags)}" +} + +# Manage security groups +resource "aws_security_group" "lb" { + name = "${var.name}-${var.environment}" + description = "Rules required for operation of ${var.name}" + vpc_id = "${local.vpc_id}" + + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = "${var.ingress_cidr_blocks}" + } + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = "${var.ingress_cidr_blocks}" + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = "${merge(map("Name", "${var.name}-lb-${var.environment}"), local.tags)}" +} + +resource "aws_security_group" "ec2" { + name = "${var.name}-ec2-sg-${var.environment}" + description = "Rules required for operation of ${var.name}" + vpc_id = "${local.vpc_id}" + + ingress { + from_port = 8200 + to_port = 8200 + description = "Allows traffics to come to vault" + protocol = "tcp" + security_groups = ["${aws_security_group.lb.id}"] + } + + ingress { + from_port = 8201 + to_port = 8201 + description = "Allows traffics to route between vault nodes" + protocol = "tcp" + self = true + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = "${merge(map("Name", "${var.name}-ec2-sg-${var.environment}"), local.tags)}" +} + +resource "aws_security_group_rule" "ssh" { + count = "${var.environment == "dev" ? 1 : 0}" + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = "${local.allow_inbound}" + + security_group_id = "${aws_security_group.ec2.id}" +} + +# Prepare appscript parameters +locals { + params_for_appscript = [ + "${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.archive_file_name}", + "${var.vault_version}", + "${var.dynamodb_table}", + "${data.aws_kms_key.this.key_id}", + "${local.ssm_root_path}", + ] + + appscript_url = "s3://${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.appscript_file_name}" + appscript_params = "${join(" ", local.params_for_appscript)}" +} + +# Manage autoscaling group +module "autoscaling_group" { + source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.2" + + Name = "${var.name}-${var.environment}" + OnFailureAction = "" + DisableRollback = "true" + + AmiId = "${data.aws_ami.this.id}" + AmiDistro = "CentOS" + AppScriptUrl = "${local.appscript_url}" + AppScriptParams = "${local.appscript_params}" + CfnBootstrapUtilsUrl = "${var.cfn_bootstrap_utils_url}" + + CfnEndpointUrl = "${var.cfn_endpoint_url}" + CloudWatchAgentUrl = "${var.cloudwatch_agent_url}" + KeyPairName = "${var.key_pair_name}" + InstanceRole = "${module.iam.profile_name}" + InstanceType = "${var.instance_type}" + NoReboot = "true" + NoPublicIp = "false" + PypiIndexUrl = "${var.pypi_index_url}" + SecurityGroupIds = "${join(",", compact(concat(list(aws_security_group.ec2.id), var.ec2_extra_security_group_ids)))}" + SubnetIds = "${join(",", var.ec2_subnet_ids)}" + TargetGroupArns = "${aws_lb_target_group.this.arn}" + ToggleNewInstances = "${var.toggle_update}" + TimeoutInMinutes = "20" + + WatchmakerEnvironment = "${var.environment}" + WatchmakerConfig = "${var.watchmaker_config}" + WatchmakerAdminGroups = "${var.watchmaker_admin_groups}" + WatchmakerAdminUsers = "${var.watchmaker_admin_users}" + WatchmakerOuPath = "${var.watchmaker_ou_path}" + + DesiredCapacity = "${var.desired_capacity}" + MinCapacity = "${var.min_capacity}" + MaxCapacity = "${var.max_capacity}" +} diff --git a/modules/bucket/bucket_policy.json b/modules/bucket/bucket_policy.json new file mode 100644 index 0000000..9db5b89 --- /dev/null +++ b/modules/bucket/bucket_policy.json @@ -0,0 +1,17 @@ +{ + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": "*", + "Resource": "${bucket_arn}/*", + "Sid": "DenyInsecureTransport" + } + ], + "Version": "2012-10-17" +} diff --git a/modules/bucket/main.tf b/modules/bucket/main.tf new file mode 100644 index 0000000..92cb8b6 --- /dev/null +++ b/modules/bucket/main.tf @@ -0,0 +1,53 @@ +### +### VARIABLES +### + +variable "bucket_name" { + description = "The name of the bucket will be use to store app scripts and vault's salt formula." + type = "string" + default = "vault-salt-formula" +} + +### +### DATA SOURCES +### + +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +data "template_file" "bucket_policy" { + template = "${file("${path.module}/bucket_policy.json")}" + + vars = { + bucket_arn = "${aws_s3_bucket.this.arn}" + } +} + +### +### RESOURCES +### + +resource "aws_s3_bucket" "this" { + bucket = "${var.bucket_name}" +} + +resource "aws_s3_bucket_policy" "this" { + bucket = "${aws_s3_bucket.this.id}" + policy = "${data.template_file.bucket_policy.rendered}" +} + +### +### OUTPUTS +### +output "bucket_name" { + description = "Name of the S3 bucket" + value = "${aws_s3_bucket.this.id}" +} + +output "bucket_arn" { + description = "ARN of the S3 bucket" + value = "${aws_s3_bucket.this.arn}" +} diff --git a/modules/iam/iam_policy.json b/modules/iam/iam_policy.json new file mode 100644 index 0000000..0021fe3 --- /dev/null +++ b/modules/iam/iam_policy.json @@ -0,0 +1,96 @@ +{ + "Statement": [ + { + "Action": [ + "cloudformation:DescribeStackResource", + "cloudformation:SignalResource" + ], + "Effect": "Allow", + "Resource": [ + "arn:${partition}:cloudformation:${region}:${account_id}:stack/${name}-${environment}*" + ], + "Sid": "CfnActions" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:${partition}:s3:::${bucket_name}/*", + "arn:${partition}:s3:::amazoncloudwatch-agent/*" + ], + "Sid": "S3ObjectActions" + }, + { + "Action": [ + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:${partition}:s3:::${bucket_name}" + ], + "Sid": "S3BucketActions" + }, + { + "Action": [ + "ssm:PutParameter" + ], + "Effect": "Allow", + "Resource": [ + "arn:${partition}:ssm:${region}:${account_id}:parameter/${ssm_path}/*" + ], + "Sid": "SSMParameterActions" + }, + { + "Action": [ + "kms:DescribeKey", + "kms:Encrypt", + "kms:Decrypt" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:kms:${region}:${account_id}:key/${key_id}" + ], + "Sid": "KMSDescribeKey" + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${name}-${environment}*" + ], + "Sid": "CloudWatchLogActions" + }, + { + "Action": [ + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive", + "dynamodb:ListTagsOfResource", + "dynamodb:DescribeReservedCapacityOfferings", + "dynamodb:DescribeReservedCapacity", + "dynamodb:ListTables", + "dynamodb:BatchGetItem", + "dynamodb:BatchWriteItem", + "dynamodb:CreateTable", + "dynamodb:DeleteItem", + "dynamodb:GetItem", + "dynamodb:GetRecords", + "dynamodb:PutItem", + "dynamodb:Query", + "dynamodb:UpdateItem", + "dynamodb:Scan", + "dynamodb:DescribeTable" + ], + "Effect": "Allow", + "Resource": [ + "arn:${partition}:dynamodb:${region}:${account_id}:table/${dynamodb_table}" + ] + } + ], + "Version": "2012-10-17" +} diff --git a/modules/iam/main.tf b/modules/iam/main.tf new file mode 100644 index 0000000..0c8bcdc --- /dev/null +++ b/modules/iam/main.tf @@ -0,0 +1,110 @@ +### +### REQUIRED VARIABLES +### +variable "name" { + description = "Name of the stack" + type = "string" +} + +variable "environment" { + description = "Type of environment -- must be one of: dev, test, prod" + type = "string" +} + +variable "kms_key_id" { + description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" + type = "string" +} + +variable "dynamodb_table" { + description = "Name of the Dynamodb to be used as storage backend for Vault" + type = "string" +} + +variable "bucket_name" { + description = "The name of the bucket will be use to store app scripts and vault's salt formula." + type = "string" +} + +variable "role_name" { + description = "Name of the role to be create for vault" + type = "string" +} + +variable "ssm_root_path" { + description = "SSM parameter path. Initialize scripts will create tokens and store them as parameter at this path." + type = "string" +} + +### +### OPTIONAL VARIABLES +### +variable "url_suffix" { + default = "amazonaws.com" + description = "URL suffix associated with the current partition" + type = "string" +} + +### +### DATA +### +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +### +### RESOURCES +### +data "template_file" "instance_policy" { + template = "${file("${path.module}/iam_policy.json")}" + + vars = { + partition = "${data.aws_partition.current.partition}" + region = "${data.aws_region.current.name}" + account_id = "${data.aws_caller_identity.current.account_id}" + + name = "${var.name}" + environment = "${var.environment}" + key_id = "${var.kms_key_id}" + dynamodb_table = "${var.dynamodb_table}" + bucket_name = "${var.bucket_name}" + ssm_path = "${var.ssm_root_path}" + } +} + +data "aws_iam_policy_document" "instance_trust_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.${var.url_suffix}"] + } + } +} + +resource "aws_iam_role" "instance" { + name = "${var.role_name}" + assume_role_policy = "${data.aws_iam_policy_document.instance_trust_policy.json}" +} + +resource "aws_iam_role_policy" "instance" { + name_prefix = "${var.role_name}_" + policy = "${data.template_file.instance_policy.rendered}" + role = "${aws_iam_role.instance.id}" +} + +resource "aws_iam_instance_profile" "instance" { + name = "${var.role_name}" + role = "${aws_iam_role.instance.name}" +} + +### +### OUTPUTS +### + +output "profile_name" { + value = "${aws_iam_instance_profile.instance.name}" +} diff --git a/outputs.tf b/outputs.tf old mode 100755 new mode 100644 index 8b13789..b7306de --- a/outputs.tf +++ b/outputs.tf @@ -1 +1,4 @@ - +output "vault_url" { + description = "URL to access Vault UI" + value = "https://${aws_route53_record.this.0.fqdn}" +} diff --git a/salt/_states/vault.py b/salt/_states/vault.py new file mode 100644 index 0000000..a5fe6b7 --- /dev/null +++ b/salt/_states/vault.py @@ -0,0 +1,95 @@ +from __future__ import absolute_import + +import logging +import os +import json + +import salt.config +import salt.syspaths +import salt.utils +import salt.exceptions + +log = logging.getLogger(__name__) + +try: + import hvac + import boto3 + DEPS_INSTALLED = True +except ImportError: + log.debug('Unable to import the libraries.') + DEPS_INSTALLED = False + +__all__ = ['initialize'] + +def __virtual__(): + return DEPS_INSTALLED + + +def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): + """ + Ensure that the vault instance has been initialized and run the + initialization if it has not. + + :param name: The id used for the state definition + :param recovery_shares: The number of recovery shares to use for the + initialization key + :param recovery_threshold: The number of recovery keys required to unseal the vault + :param ssm_path: The path to store root token in SSM Parameter store + + :returns: Result of the execution + :rtype: dict + """ + ret = {'name': name, + 'comment': '', + 'result': '', + 'changes': {}} + + client = hvac.Client(url='http://localhost:8200') + + is_initialized = client.sys.is_initialized() + + if is_initialized: + ret['result'] = True + ret['comment'] = 'Vault is already initialized' + else: + result = client.sys.initialize( + recovery_shares=recovery_shares, + recovery_threshold=recovery_threshold + ) + root_token = result['root_token'] + recovery_keys = result['recovery_keys'] + is_success = client.sys.is_initialized() + + ret['result'] = is_success + ret['changes'] = { + 'root_credentials': { + 'new': { + 'recover_keys': '/{}/{}'.format(ssm_path, 'recovery_keys'), + 'root_token': '/{}/{}'.format(ssm_path, 'root_token') + }, + 'old': {} + } + } + + #upload root token ssm parameter store + if is_success: + ssm_client = boto3.client('ssm') + #saving root token + ssm_client.put_parameter( + Name = '/{}/{}'.format(ssm_path, 'root_token'), + Value = root_token, + Type = "SecureString", + Overwrite = True + ) + + #saving recovery keys + ssm_client.put_parameter( + Name = '/{}/{}'.format(ssm_path, 'recovery_keys'), + Value = json.dumps(recovery_keys), + Type = "SecureString", + Overwrite = True + ) + + ret['comment'] = 'Vault has {}initialized'.format( + '' if is_success else 'failed to be ') + return ret diff --git a/salt/vault/configure.sls b/salt/vault/configure.sls new file mode 100644 index 0000000..f54f51d --- /dev/null +++ b/salt/vault/configure.sls @@ -0,0 +1,20 @@ + +{% from "vault/map.jinja" import vault with context %} + +include: + - .service + +{# only configure if vault is not in dev_mode #} +{%- if not vault.dev_mode %} + +vault_configure_service_file: + file.managed: + - source: salt://vault/files/server.hcl.jinja + - name: /etc/vault/conf.d/server.hcl + - template: jinja + - user: root + - group: root + - mode: '0755' + - makedirs: True + +{%- endif %} diff --git a/salt/vault/files/server.hcl.jinja b/salt/vault/files/server.hcl.jinja new file mode 100644 index 0000000..4e8e38e --- /dev/null +++ b/salt/vault/files/server.hcl.jinja @@ -0,0 +1,24 @@ +{%- from "vault/map.jinja" import vault with context -%} + +api_addr = "http://{{ grains['ip_interfaces']['eth0'][0] }}:8200" + +backend "dynamodb" { + region = "{{ vault.region }}" + ha_enabled = "true" + table = "{{ vault.dynamodb_table }}" +} + +seal "awskms" { + region = "{{ vault.region }}" + kms_key_id = "{{ vault.kms_key_id }}" +} + +listener "tcp" { + address = "{{ vault.listener_address }}" + tls_disable = {{ vault.listener_tls_disable }} +} + +default_lease_ttl = "{{ vault.default_lease_ttl }}" +max_lease_ttl = "{{ vault.max_lease_ttl }}" + +ui = true diff --git a/salt/vault/files/vault.conf.jinja b/salt/vault/files/vault.conf.jinja new file mode 100644 index 0000000..d328ddf --- /dev/null +++ b/salt/vault/files/vault.conf.jinja @@ -0,0 +1,24 @@ +{%- from "vault/map.jinja" import vault with context -%} +description "Vault server" + +start on (runlevel [345] and started network) +stop on (runlevel [!345] or stopping network) + +respawn + +script + if [ -f "/etc/service/vault" ]; then + . /etc/service/vault + fi + + # Make sure to use all our CPUs, because Vault can block a scheduler thread + export GOMAXPROCS=`nproc` + + exec /usr/local/bin/vault server \ +{%- if vault.dev_mode %} + -dev \ +{% else %} + -config="/etc/vault/conf.d/server.hcl" \ +{% endif -%} + >>/var/log/vault.log 2>&1 +end script diff --git a/salt/vault/files/vault.service.jinja b/salt/vault/files/vault.service.jinja new file mode 100644 index 0000000..940d895 --- /dev/null +++ b/salt/vault/files/vault.service.jinja @@ -0,0 +1,20 @@ +{%- from "vault/map.jinja" import vault with context -%} +[Unit] +Description=Vault secret management tool +Requires=network-online.target +After=network-online.target + +[Service] +User=vault +Group=vault +PIDFile=/var/run/vault/vault.pid +ExecStart=/usr/local/bin/vault server {% if vault.dev_mode %} -dev {% else %} -config=/etc/vault/conf.d {% endif %} +ExecReload=/bin/kill -HUP $MAINPID +KillMode=process +KillSignal=SIGTERM +Restart=on-failure +RestartSec=42s +LimitMEMLOCK=infinity + +[Install] +WantedBy=multi-user.target diff --git a/salt/vault/firewall.sls b/salt/vault/firewall.sls new file mode 100644 index 0000000..a0c1f5f --- /dev/null +++ b/salt/vault/firewall.sls @@ -0,0 +1,25 @@ + +{% from "vault/map.jinja" import vault with context %} + +firewalld_service: + firewalld.service: + - name: vault + - ports: + - 8200/tcp + - 8201/tcp + +firewalld_zone: + firewalld.present: + - name: vault + - services: + - vault + - sources: +{%- for mac, properties in salt.grains.get('meta-data:network:interfaces:macs', {}).items() %} + {%- if properties['device-number'] == 0 %} + {%- for cidr in properties['vpc-ipv4-cidr-blocks'].split('\n') %} + - {{ cidr }} + {%- endfor %} + {%- endif %} +{%- endfor %} + - require: + - firewalld: firewalld_service diff --git a/salt/vault/init.sls b/salt/vault/init.sls new file mode 100644 index 0000000..c6d0337 --- /dev/null +++ b/salt/vault/init.sls @@ -0,0 +1,6 @@ +include: + - .install + - .configure + - .service + - .firewall + diff --git a/salt/vault/initialize.sls b/salt/vault/initialize.sls new file mode 100644 index 0000000..5732c94 --- /dev/null +++ b/salt/vault/initialize.sls @@ -0,0 +1,8 @@ +{% from "vault/map.jinja" import vault with context %} + +# Perform initialization and unseal process +vault_initialize_server: + vault.initialized: + - recovery_shares: {{ vault.recovery_shares }} + - recovery_threshold: {{ vault.recovery_threshold }} + - ssm_path: {{ vault.ssm_path }} diff --git a/salt/vault/install.sls b/salt/vault/install.sls new file mode 100644 index 0000000..3346bbb --- /dev/null +++ b/salt/vault/install.sls @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + +{% from "vault/map.jinja" import vault with context %} + +vault_package_install_group_present: + group.present: + - name: vault + - system: True + +vault_package_install_user_present: + user.present: + - name: vault + - system: True + - gid_from_name: True + - home: /var/lib/vault + +vault_data_dir: + file.directory: + - name: /etc/vault + - user: vault + - group: vault + - mode: '0700' + +vault_package_install_file_directory: + file.directory: + - name: /opt/vault/bin + - makedirs: True + +vault_package_install_file_managed: + file.managed: + - name: /opt/vault/{{ vault.version }}_SHA256SUMS + - source: {{ vault.repo_base_url }}/{{ vault.version }}/vault_{{ vault.version }}_SHA256SUMS + - skip_verify: True + - makedirs: True + +vault_package_install_archive_extracted: + archive.extracted: + - name: /opt/vault/bin + - source: {{ vault.repo_base_url }}/{{ vault.version }}/vault_{{ vault.version }}_{{ vault.platform }}.zip + - source_hash: {{ vault.repo_base_url }}/{{ vault.version }}/vault_{{ vault.version }}_SHA256SUMS + - source_hash_name: vault_{{ vault.version }}_{{ vault.platform }}.zip + - archive_format: zip + - enforce_toplevel: False + - overwrite: True + - onchanges: + - file: vault_package_install_file_managed + +vault_package_install_service_dead: + service.dead: + - name: vault + - onchanges: + - file: vault_package_install_file_managed + - onlyif: test -f /etc/systemd/system/vault.service + +vault_package_install_file_symlink: + file.symlink: + - name: /usr/local/bin/vault + - target: /opt/vault/bin/vault + - force: true + +vault_package_install_cmd_run: + cmd.run: + - name: setcap cap_ipc_lock=+ep /opt/vault/bin/vault + - onchanges: + - archive: vault_package_install_archive_extracted + + +install_package_dependencies: + pkg.installed: + - pkgs: {{ vault.module_dependencies.pkgs | json }} + - reload_modules: True + +install_pip_executable: + cmd.run: + - name: | + curl -L "https://bootstrap.pypa.io/get-pip.py" > get_pip.py + sudo python get_pip.py pip==18.0.0 + rm get_pip.py + + - reload_modules: True + +install_python_dependencies: + pip.installed: + - pkgs: {{ vault.module_dependencies.pip_deps | json }} + - reload_modules: True + - ignore_installed: True diff --git a/salt/vault/map.jinja b/salt/vault/map.jinja new file mode 100644 index 0000000..786f509 --- /dev/null +++ b/salt/vault/map.jinja @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- + +{% import_yaml "vault/maps/defaults.yaml" or {} as defaults %} +{% import_yaml "vault/maps/osfamilymap.yaml" or {} as osfamilymap %} +{% import_yaml "vault/maps/initfamilymap.yaml" or {} as initfamilymap %} + +{%- set merged_defaults = salt.grains.filter_by(defaults, + default='vault', + merge=salt.grains.filter_by(osfamilymap, grain='os_family', + merge=salt.grains.filter_by(initfamilymap, grain='init', + merge=salt.grains.get('vault', default={}) + ) + ) +) %} + +{#- Merge the vault pillar #} +{%- set vault = salt.pillar.get('vault', default=merged_defaults, merge=True) %} diff --git a/salt/vault/maps/defaults.yaml b/salt/vault/maps/defaults.yaml new file mode 100644 index 0000000..22a2694 --- /dev/null +++ b/salt/vault/maps/defaults.yaml @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- + +vault: + repo_base_url: "https://releases.hashicorp.com/vault" + dev_mode: False + verify_download: True + listener_address: "0.0.0.0:8200" + listener_tls_disable: 1 + default_lease_ttl: 192h #one week + max_lease_ttl: 192h #one week + recovery_shares: 5 + recovery_threshold: 3 + diff --git a/salt/vault/maps/initfamilymap.yaml b/salt/vault/maps/initfamilymap.yaml new file mode 100644 index 0000000..acd8dad --- /dev/null +++ b/salt/vault/maps/initfamilymap.yaml @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*-] +systemd: + service: + path: /etc/systemd/system/vault.service + source: salt://vault/files/vault.service.jinja + +upstart: + service: + path: /etc/init/vault.conf + source: salt://vault/files/vault.conf.jinja diff --git a/salt/vault/maps/osfamilymap.yaml b/salt/vault/maps/osfamilymap.yaml new file mode 100644 index 0000000..fdf245d --- /dev/null +++ b/salt/vault/maps/osfamilymap.yaml @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +RedHat: + platform: linux_amd64 + gpg_pkg: gnupg2 + module_dependencies: + pkgs: + - gcc + - python + - curl + - libffi-devel + - python-devel + - openssl-devel + pip_deps: + - hvac + - testinfra + - boto3 + + +Debian: + gpg_pkg: gnupg2 + platform: linux_amd64 + module_dependencies: + pkgs: + - gcc + - python + - curl + - libffi-dev + - python-dev + - libssl-dev + pip_deps: + - pyopenssl + - hvac + - testinfra + - boto3 diff --git a/salt/vault/service.sls b/salt/vault/service.sls new file mode 100644 index 0000000..7befad4 --- /dev/null +++ b/salt/vault/service.sls @@ -0,0 +1,24 @@ +{% from "vault/map.jinja" import vault with context %} + +manage_selinux_mode: + selinux.mode: + - name: permissive + +vault_service_init_file_managed: + file.managed: + - name: {{ vault.service.path }} + - source: {{ vault.service.source }} + - template: jinja + +vault_service_running: + service.running: + - name: vault + - enable: True + - reload: True + - require: + - selinux: manage_selinux_mode + - watch: + - archive: vault_package_install_archive_extracted + - file: vault_configure_service_file + + diff --git a/scripts/appscript.sh b/scripts/appscript.sh new file mode 100644 index 0000000..1efe12a --- /dev/null +++ b/scripts/appscript.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -eu -o pipefail + +[[ $# -lt 5 ]] && { + echo "Usage $0 " >&2 + echo " Example: $0 bucket-foo/randomid/salt.zip 1.1.3 vault-data-table bb0392ea-f31b-4ef2-af9e-be18661a8246 vault/dev/token" >&2 + exit 1 +} + +# Required vars +SALT_ARCHIVE=$1 +VAULT_VERSION=$2 +DYNAMODB_TABLE=$3 +KMS_KEY_ID=$4 +SSM_PATH=$5 + +# Internal vars +AWS_AZ=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone) +SALT_DIR="/srv/salt" +ARCHIVE_NAME="salt_formula.zip" + +# Export standard aws envs +export AWS_DEFAULT_REGION=${AWS_AZ:0:${#AWS_AZ} - 1} + +# Export Vault local address +export VAULT_ADDR=http://127.0.0.1:8200 + +echo "[appscript]: Ensuring default salt srv location exists, ${SALT_DIR}..." +mkdir -p ${SALT_DIR} + +echo "[appscript]: Download salt formula archive file from s3://${SALT_ARCHIVE}..." +aws s3 cp "s3://${SALT_ARCHIVE}" ${ARCHIVE_NAME} + +echo "[appscript]: Unzip salt formula archive file to ${SALT_DIR}" +yum install unzip -y +unzip ${ARCHIVE_NAME} -d ${SALT_DIR} + +echo "[appscript]: Remove salt formula archive file ${ARCHIVE_NAME}" +rm ${ARCHIVE_NAME} + +echo "[appscript]: Updating salt grains..." +salt-call --local saltutil.sync_grains + +echo "[appscript]: Configuring salt to read ec2 metadata into grains..." +echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf + +echo "[appscript]: Setting required salt grains for vault..." +salt-call --local grains.setval vault \ +"{'version':'${VAULT_VERSION}', 'dynamodb_table':'${DYNAMODB_TABLE}', 'kms_key_id':'${KMS_KEY_ID}', 'region':'${AWS_DEFAULT_REGION}', 'ssm_path': '${SSM_PATH}'}" + +echo "[appscript]: Applying the vault install and configure states..." +salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee /var/log/salt_vault.log + +echo "[appscript]: Updating salt states to include custom vault's states..." +salt-call --local saltutil.sync_states + +echo "[appscript]: Initializing the vault..." +salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee /var/log/salt_vault_initialize.log + +echo "[appscript]: Vault's status" +vault status + +echo "[appscript]: Completed appscript vault successfully!" diff --git a/variables.tf b/variables.tf old mode 100755 new mode 100644 index 8b13789..c4a77a1 --- a/variables.tf +++ b/variables.tf @@ -1 +1,205 @@ +### +### REQUIRED VARIABLES +### +variable "name" { + description = "Name of the vault stack, will be use to prefix resources" + type = "string" +} +variable "environment" { + description = "Type of environment -- must be one of: dev, test, prod" + type = "string" +} + +variable "bucket_name" { + description = "The name of the bucket will be use to store app scripts and vault's salt formula." + type = "string" +} + +variable "key_pair_name" { + description = "Keypair to associate to launched instances" + type = "string" +} + +variable "ami_owner" { + description = "Account id/alias of the AMI owner" + type = "string" +} + +variable "additional_ips_allow_inbound" { + description = "List of ip address that allow to have access to resources" + type = "list" +} + +variable "ec2_extra_security_group_ids" { + description = "List of additional security groups to add to EC2 instances" + type = "list" +} + +variable "ec2_subnet_ids" { + description = "List of subnets where EC2 instances will be launched" + type = "list" +} + +variable "lb_certificate_arn" { + type = "string" + description = "Arn of a created certificate to be use for the load balancer" +} + +variable "lb_subnet_ids" { + description = "List of subnets to associate to the Load Balancer" + type = "list" +} + +variable "vault_version" { + description = "Version of Vault to be installed on servers" + type = "string" +} + +variable "vault_url" { + type = "string" + description = "The DNS address that vault will be accessible at. Example: vault.domain.net" +} + +variable "kms_key_id" { + description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" + type = "string" +} + +variable "dynamodb_table" { + description = "Name of the Dynamodb to be used as storage backend for Vault" + type = "string" +} + +variable "route53_zone_id" { + type = "string" + description = "Zone ID for domain" +} + +### +### OPTIONAL VARIABLES +### +variable "ami_name_filter" { + description = "Will be use to filter out AMI" + type = "string" + default = "spel-minimal-centos-7-hvm-*.x86_64-gp2" +} + +variable "ami_name_regex" { + description = "Regex to help fine-grain filtering AMI" + type = "string" + default = "spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2" +} + +variable "instance_type" { + default = "t2.medium" + description = "Amazon EC2 instance type" + type = "string" +} + +variable "lb_internal" { + description = "Boolean indicating whether the load balancer is internal or external" + type = "string" + default = false +} + +variable "ingress_cidr_blocks" { + description = "(Optional) List of CIDR block." + type = "list" + default = ["0.0.0.0/0"] +} + +variable "lb_ssl_policy" { + description = "The name of the SSL Policy for the listener" + type = "string" + default = "ELBSecurityPolicy-FS-2018-06" +} + +variable "min_capacity" { + type = "string" + description = "(Optional) Minimum number of instances in the Autoscaling Group" + default = "1" +} + +variable "max_capacity" { + type = "string" + description = "(Optional) Maximum number of instances in the Autoscaling Group" + default = "2" +} + +variable "desired_capacity" { + type = "string" + description = "(Optional) Desired number of instances in the Autoscaling Group" + default = "2" +} + +variable "pypi_index_url" { + type = "string" + description = "(Optional) URL to the PyPi Index" + default = "https://pypi.org/simple" +} + +variable "cfn_endpoint_url" { + type = "string" + description = "(Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com" + default = "https://cloudformation.us-east-1.amazonaws.com" +} + +variable "cfn_bootstrap_utils_url" { + type = "string" + description = "(Optional) URL to aws-cfn-bootstrap-latest.tar.gz" + default = "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz" +} + +variable "cloudwatch_agent_url" { + type = "string" + description = "(Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" + default = "" +} + +variable "watchmaker_config" { + type = "string" + description = "(Optional) URL to a Watchmaker config file" + default = "" +} + +variable "watchmaker_ou_path" { + type = "string" + description = "(Optional) DN of the OU to place the instance when joining a domain. If blank and WatchmakerEnvironment enforces a domain join, the instance will be placed in a default container. Leave blank if not joining a domain, or if WatchmakerEnvironment is false" + default = "" +} + +variable "watchmaker_admin_groups" { + type = "string" + description = "(Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance" + default = "" +} + +variable "watchmaker_admin_users" { + type = "string" + description = "(Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance" + default = "" +} + +variable "toggle_update" { + default = "A" + description = "(Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B" + type = "string" +} + +variable "route53_enabled" { + description = "Creates Route53 DNS entries for Vault automatically" + default = false +} + +variable "tags" { + description = "(Optional) list of tags to include with resource" + type = "map" + default = {} +} + +variable "ip_data_url" { + description = "URL to get ip address of the current user" + type = "string" + default = "http://ipv4.icanhazip.com" +} From 5928213a0963e80b2588de44b97ce4d75b3d40f4 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 7 Aug 2019 09:35:02 -0400 Subject: [PATCH 02/34] Adding custom saltstack modules to handle configurations --- README.md | 1 + main.tf | 37 +- modules/iam/iam_policy.json | 3 +- salt/_modules/vault.py | 881 +++++++++++++++++++++++++++++++ salt/_states/vault.py | 37 +- salt/_utils/vault.py | 95 ++++ salt/vault/configure.sls | 3 - salt/vault/initialize.sls | 1 + salt/vault/install.sls | 11 +- salt/vault/maps/osfamilymap.yaml | 2 + salt/vault/service.sls | 2 - salt/vault/sync.sls | 23 + scripts/appscript.sh | 72 ++- tests/bucket/main.tf | 18 + tests/module_test.go | 67 +++ variables.tf | 5 + 16 files changed, 1204 insertions(+), 54 deletions(-) create mode 100644 salt/_modules/vault.py create mode 100644 salt/_utils/vault.py create mode 100644 salt/vault/sync.sls create mode 100644 tests/bucket/main.tf create mode 100644 tests/module_test.go diff --git a/README.md b/README.md index 137b8f2..d5e6e18 100755 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | | cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | | cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | +| configs\_path | Path to directory that contains configuration files for vault | string | `""` | no | | desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | | dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | n/a | yes | | ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | n/a | yes | diff --git a/main.tf b/main.tf index 9d70373..a95204c 100644 --- a/main.tf +++ b/main.tf @@ -9,14 +9,15 @@ terraform { locals { name_id = "${var.name}-${random_string.this.result}" vpc_id = "${data.aws_subnet.lb.0.vpc_id}" - role_name = "INSTANCE_VAULT_${data.aws_caller_identity.current.account_id}" + role_name = "${upper(var.name)}_INSTANCE_${data.aws_caller_identity.current.account_id}" ssm_root_path = "vault/${var.environment}/${data.aws_caller_identity.current.account_id}/${var.name}" public_ip = "${chomp(data.http.ip.body)}/32" allow_inbound = "${compact(distinct(concat(list(local.public_ip), var.additional_ips_allow_inbound)))}" archive_file_name = "salt.zip" + configs_file_name = "configs.zip" appscript_file_name = "appscript.sh" - archive_file_path = "${path.module}/.files/${local.archive_file_name}" - appscript_file_path = "${path.module}/scripts/${local.appscript_file_name}" + archive_dir_path = "${path.module}/.files" + appscript_dir_path = "${path.module}/scripts" tags = { Environment = "${var.environment}" @@ -62,8 +63,14 @@ data "aws_kms_key" "this" { data "archive_file" "salt" { type = "zip" - source_dir = "salt" - output_path = "${local.archive_file_path}" + source_dir = "${path.module}/salt" + output_path = "${local.archive_dir_path}/${local.archive_file_name}" +} +data "archive_file" "configs" { + count = "${var.configs_path == "" ? 0 : 1}" + type = "zip" + source_dir = "${var.configs_path}" + output_path = "${local.archive_dir_path}/${local.configs_file_name}" } # Manage Bucket module @@ -96,15 +103,23 @@ resource "random_string" "this" { resource "aws_s3_bucket_object" "salt_zip" { bucket = "${module.s3_bucket.bucket_name}" key = "${random_string.this.result}/${local.archive_file_name}" - source = "${local.archive_file_path}" + source = "${local.archive_dir_path}/${local.archive_file_name}" etag = "${data.archive_file.salt.output_md5}" } +resource "aws_s3_bucket_object" "configs_zip" { + count = "${var.configs_path == "" ? 0 : 1}" + bucket = "${module.s3_bucket.bucket_name}" + key = "${random_string.this.result}/${local.configs_file_name}" + source = "${local.archive_dir_path}/${local.configs_file_name}" + etag = "${data.archive_file.configs.*.output_md5[count.index]}" +} + resource "aws_s3_bucket_object" "app_script" { bucket = "${module.s3_bucket.bucket_name}" key = "${random_string.this.result}/${local.appscript_file_name}" - source = "${local.appscript_file_path}" - etag = "${filemd5("${local.appscript_file_path}")}" + source = "${local.appscript_dir_path}/${local.appscript_file_name}" + etag = "${filemd5("${local.appscript_dir_path}/${local.appscript_file_name}")}" } # Manage domain record @@ -268,12 +283,15 @@ resource "aws_security_group_rule" "ssh" { # Prepare appscript parameters locals { + # combine key to configs s3 object, otherwise pass 'n/a' to appscript + s3_configs_key = "${var.configs_path == "" ? "n/a" : "${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.configs_file_name}"}" params_for_appscript = [ "${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.archive_file_name}", + "${local.s3_configs_key}", "${var.vault_version}", "${var.dynamodb_table}", "${data.aws_kms_key.this.key_id}", - "${local.ssm_root_path}", + "${local.ssm_root_path}" ] appscript_url = "s3://${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.appscript_file_name}" @@ -296,6 +314,7 @@ module "autoscaling_group" { CfnEndpointUrl = "${var.cfn_endpoint_url}" CloudWatchAgentUrl = "${var.cloudwatch_agent_url}" + CloudWatchAppLogs = ["/var/log/salt_vault.log", "/var/log/salt_vault_initialize.log", "/var/log/salt_vault_sync.log"] KeyPairName = "${var.key_pair_name}" InstanceRole = "${module.iam.profile_name}" InstanceType = "${var.instance_type}" diff --git a/modules/iam/iam_policy.json b/modules/iam/iam_policy.json index 0021fe3..dca26ec 100644 --- a/modules/iam/iam_policy.json +++ b/modules/iam/iam_policy.json @@ -34,7 +34,8 @@ }, { "Action": [ - "ssm:PutParameter" + "ssm:PutParameter", + "ssm:GetParameter" ], "Effect": "Allow", "Resource": [ diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py new file mode 100644 index 0000000..1153a22 --- /dev/null +++ b/salt/_modules/vault.py @@ -0,0 +1,881 @@ +# -*- coding: utf-8 -*- +""" +This module provides methods for interacting with Hashicorp Vault via the HVAC +library. +""" +from __future__ import absolute_import + +import logging +import hashlib +import json +import os +import glob +from collections import OrderedDict +from datetime import datetime, timedelta + + +import salt.config +import salt.syspaths +import salt.utils +import salt.exceptions + + +log = logging.getLogger(__name__) + +try: + import hvac + DEPS_INSTALLED = True +except: + log.debug('Unable to import the dependencies...') + DEPS_INSTALLED = False + +class InsufficientParameters(Exception): + pass + + +def __virtual__(): + return DEPS_INSTALLED + + +class VaultAuthMethod: + """ + Vault authentication method container + """ + type = None + path = None + description = None + config = None + auth_config = None + extra_config = None + + def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): + """ + Instanciate class + + :param type: Authentication type + :type type: str + :param path: Authentication mount point + :type path: str + :param description: Authentication description + :type description: str + :param config: Authentication config + :type config: dict + :param auth_config: Authentification specific configuration + :type auth_config: dict + :param extra_config: Extra Authentification configurations + :type extra_config: dict + """ + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.config = {} + for elem in config: + if config[elem] != "": + self.config[elem] = config[elem] + self.auth_config = auth_config + self.extra_config = extra_config + + def get_unique_id(self): + """ + Return a unique hash by auth method only using the type and path + + :return: str + """ + unique_str = str(self.type + self.path) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def get_tuning_hash(self): + """ + Return a unique ID per tuning configuration + + :return: str + """ + conf_str = self.description + str(self.config) + sha256_hash = hashlib.sha256(conf_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_unique_id() == other.get_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.config), + self.get_unique_id())) + + +class VaultSecretEngine: + """ + Vault secrete engine container + """ + type = None + path = None + description = None + config = None + secret_config = None + extra_config = None + + def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): + """ + Instantiate Class + + :param type: Secret type + :type type: str + :param path: Secret mount point + :type path: str + :param description: Secret description + :type description: str + :param config: Secret basic config + :type config: dict + :param secret_config: Secret specific configuration + :type secret_config: dict + :param extra_config: Secret extra configuration + :type extra_config: dict + """ + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.config = dict() + self.config["force_no_cache"] = False + for elem in config: + if config[elem] != "": + self.config[elem] = config[elem] + self.secret_config = secret_config + self.extra_config = extra_config + + def get_unique_id(self): + """ + Return a unique hash by secret engine only using the type and path + + :return: str + """ + unique_str = str(self.type + self.path) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_unique_id() == other.get_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.config), + self.get_unique_id())) + + +class VaultAuditDevice: + type = None + path = None + description = None + options = None + + def __init__(self, type, path, description, options): + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.options = options + + def get_device_unique_id(self): + unique_str = str(self.type + self.path + + self.description + str(self.options)) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_device_unique_id() == other.get_device_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.options), + self.get_device_unique_id())) + + +class VaultPolicyManager(): + """ + Module for managing policies within Vault + """ + client = None + local_policies = [] + remote_policies = [] + policies_folder = '' + ret = {} + + def __init__(self, policies_dir_path): + log.info("Initializing Vault Policy Manager...") + self.policies_folder = policies_dir_path + + def get_remote_policies(self): + """ + Reading policies from configs folder + """ + log.info('Retrieving policies from vault...') + try: + policies_resp = self.client.sys.list_policies() + + for policy in policies_resp['data']['policies']: + if not (policy == 'root' or policy == 'default'): + self.remote_policies.append(policy) + + log.debug('Current configured policies: %s' % + ', '.join(self.remote_policies)) + + except Exception as e: + log.exception(e) + + log.info('Finished retrieving policies from vault.') + + def get_local_policies(self): + """ + Reading policies from configs folder + """ + log.info('Loading policies from local config folder...') + for policy_file in glob.iglob(os.path.join(self.policies_folder, "*.hcl")): + name = os.path.splitext(os.path.basename(policy_file))[0] + prefix = policy_file.split(os.sep)[-2] + log.debug("Local policy %s - prefix: %s - name: %s found" + % (policy_file, prefix, name)) + + with open(policy_file, 'r') as fd: + self.local_policies.append({ + "name": name, + "content": fd.read() + }) + log.info('Finished loading policies local config folder.') + + def push_policies(self): + """ + Sync policies from configs folder to vault + """ + log.info('Pushing policies from local config folder to vault...') + new_policies = [] + for policy in self.local_policies: + self.client.sys.create_or_update_policy( + name=policy['name'], + policy=policy['content'] + ) + if policy['name'] in self.remote_policies: + log.debug('Policy "%s" has been updated.', policy["name"]) + else: + new_policies.append(policy["name"]) + log.debug('Policy "%s" has been created.', policy["name"]) + + log.info('Finished pushing policies local config folder to vault.') + + # Build return object + + self.ret['old'] = self.remote_policies + if len(new_policies) > 0: + self.ret['new'] = json.loads(json.dumps(new_policies)) + else: + self.ret['new'] = "No changes" + + def cleanup_policies(self): + """ + Cleaning up policies + """ + log.info('Cleaning up vault policies...') + has_change = False + for policy in self.remote_policies: + if policy not in [pol['name'] for pol in self.local_policies]: + log.debug( + '"%s" is not found in configs folder. Removing it from vault...', policy) + has_change = True + self.client.sys.delete_policy(name=policy) + log.debug('"%s" is removed.', policy) + + if has_change: + self.ret['new'] = json.loads(json.dumps( + [ob['name'] for ob in self.local_policies])) + + log.info('Finished cleaning up vault policies.') + + def run(self): + """ + Control the executions + """ + log.info('-------------------------------------') + self.client = __utils__['vault.build_client']() + self.get_remote_policies() + self.get_local_policies() + self.push_policies() + self.cleanup_policies() + log.info('-------------------------------------') + return self.ret + + +class VaultAuthManager(): + """ + Module for managing Vault Authentication Methods + """ + auth_methods_remote = [] + auth_methods_local = [] + ldap_groups = [] + config_path = '' + ret = {} + client = None + + def __init__(self, config_path): + log.info("Initializing Vault Auth Manager...") + self.config_path = config_path + + def get_remote_auth_methods(self): + """ + Retrieve auth methods from vault + """ + log.info('Retrieving auth methods from Vault...') + auth_resp = self.client.sys.list_auth_methods() + log.debug('Current auth methods from Vault: %s', + ', '.join(auth_resp['data'].keys())) + + for auth_method in auth_resp['data']: + self.auth_methods_remote.append( + VaultAuthMethod( + type=auth_resp[auth_method]['type'], + path=(auth_resp[auth_method]["path"] + if 'path' in auth_resp[auth_method] else auth_method), + description=auth_resp[auth_method]["description"], + config=OrderedDict( + sorted(auth_resp[auth_method]["config"].items())) + ) + ) + + log.info('Finished retrieving auth methods from vault.') + + def get_local_auth_methods(self): + log.info('Loading auth methods form local config file: %s', + self.config_path) + config = __utils__['vault.load_config_file']( + config_path=self.config_path) + for auth_method in config["auth-methods"]: + auth_config = None + extra_config = None + + if "auth_config" in auth_method: + auth_config = OrderedDict( + sorted(auth_method["auth_config"].items())) + + if "extra_config" in auth_method: + extra_config = OrderedDict( + sorted(auth_method["extra_config"].items())) + + self.auth_methods_local.append( + VaultAuthMethod( + type=auth_method["type"], + path=auth_method["path"], + description=auth_method["description"], + config=OrderedDict(sorted(auth_method["config"].items())), + auth_config=auth_config, + extra_config=extra_config + ) + ) + log.info('Finished loading auth methods from local config file.') + + def configure_auth_methods(self): + log.info('Processing and configuring auth methods...') + new_auth_methods = [] + for auth_method in self.auth_methods_local: + log.debug('Checking if auth method "%s" is enabled...', + auth_method.path) + if auth_method in self.auth_methods_remote: + log.debug( + 'Auth method "%s" is already enabled. Tuning...', auth_method.path) + self.client.sys.tune_auth_method( + path=auth_method.path, + description=auth_method.description, + default_lease_ttl=auth_method.config["default_lease_ttl"], + max_lease_ttl=auth_method.config["max_lease_ttl"] + ) + log.debug('Auth method "%s" is tuned.', auth_method.type) + else: + log.debug( + 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) + self.client.sys.enable_auth_method( + method_type=auth_method.type, + path=auth_method.path, + description=auth_method.description, + config=auth_method.config + ) + log.debug('Auth method "%s" is enabled.', auth_method.type) + new_auth_methods.append(auth_method.type) + + # Provision config for specific auth method + if auth_method.auth_config: + if auth_method.type == "ldap": + log.debug('Provisioning configuration for LDAP...') + self.client.auth.ldap.configure(**auth_method.auth_config) + log.debug('Configuration for LDAP is provisioned.') + else: + log.debug( + 'Auth method "%s" does not contain any specific configurations.', auth_method.type) + + if auth_method.extra_config: + log.debug( + 'Provisioning extra configurations for auth method "%s"', auth_method.type) + # Get LDAP group mapping from vault + try: + ldap_list_group_response = self.client.auth.ldap.list_groups() + if ldap_list_group_response != None: + self.ldap_groups = ldap_list_group_response["data"]["keys"] + + except Exception as e: + log.exception(e) + + log.debug("LDAP groups from vault: %s", str(self.ldap_groups)) + + # Update LDAP group mapping + log.debug( + 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) + local_config_groups = auth_method.extra_config["group_policy_map"] + for key in local_config_groups: + log.debug('LDAP Group ["%s"] -> Policies %s', + str(key), local_config_groups[key]) + try: + self.client.auth.ldap.create_or_update_group( + name=key, + policies=local_config_groups[key] + ) + except Exception as e: + log.exception(e) + + # Clean up LDAP group mapping + if self.ldap_groups != None: + for group in self.ldap_groups: + if group in {k.lower(): v for k, v in local_config_groups.items()}: + log.debug( + 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) + else: + log.info( + 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) + self.client.auth.ldap.delete_group( + name=group + ) + log.info( + 'LDAP group mapping ["%s"] deleted.', group) + else: + log.debug( + 'Auth method "%s" does not contain any extra configurations.', auth_method.type + ) + log.info('Finished processing and configuring auth methods...') + + # Build return object + self.ret['old'] = json.loads(json.dumps( + [ob.type for ob in self.auth_methods_remote])) + + if len(new_auth_methods) > 0: + self.ret['new'] = json.loads(json.dumps(new_auth_methods)) + else: + self.ret['new'] = "No changes" + + def cleanup_auth_methods(self): + log.info('Cleaning up auth methods...') + has_change = False + + for auth_method in self.auth_methods_remote: + if auth_method not in self.auth_methods_local: + has_change = True + log.info( + 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) + self.client.sys.disable_auth_method( + path=auth_method.path + ) + log.info('Auth method "%s" is disabled.', auth_method.type) + log.info('Finished cleaning up auth methods.') + + if has_change: + self.ret['new'] = json.loads(json.dumps( + [ob.type for ob in self.auth_methods_local])) + + def run(self): + """ + Control the executions + """ + log.info('-------------------------------------') + self.client = __utils__['vault.build_client']() + self.get_remote_auth_methods() + self.get_local_auth_methods() + self.configure_auth_methods() + self.cleanup_auth_methods() + log.info('-------------------------------------') + + return self.ret + + +class VaultSecretsManager(): + """ + Module for handling Vault secret engines + """ + client = None + config_path = '' + remote_secret_engines = [] + local_secret_engines = [] + ret = {} + + def __init__(self, config_path): + log.info("Initializing Vault Secret Manager...") + self.config_path = config_path + + def get_remote_secrets_engines(self): + """ + Retrieve secret engines from vault server + """ + log.info('Retrieving secrets engines from vault') + try: + secrets_engines_resp = self.client.sys.list_mounted_secrets_engines() + for engine in secrets_engines_resp['data']: + self.remote_secret_engines.append( + VaultSecretEngine( + type=secrets_engines_resp[engine]['type'], + path=(secrets_engines_resp[engine]["path"] + if 'path' in secrets_engines_resp[engine] else engine), + description=secrets_engines_resp[engine]["description"], + config=OrderedDict( + sorted(secrets_engines_resp[engine]["config"].items())) + ) + ) + self.remote_secret_engines.sort(key=lambda x: x.type) + except Exception as e: + log.exception(e) + log.info('Finished retrieving secrets engines from vault.') + + def get_local_secrets_engines(self): + """ + Retrieving secret engines from local config file + """ + log.debug('Reding secret engines from config file...') + try: + config = __utils__['vault.load_config_file']( + config_path=self.config_path) + + for secret_engine in config['secrets-engines']: + secret_config = None + extra_config = None + if 'secret_config' in secret_engine: + secret_config = OrderedDict( + sorted(secret_engine["secret_config"].items())) + + if 'extra_config' in secret_engine: + extra_config = OrderedDict( + sorted(secret_engine["extra_config"].items())) + + self.local_secret_engines.append( + VaultSecretEngine( + type=secret_engine["type"], + path=secret_engine["path"], + description=secret_engine["description"], + config=OrderedDict( + sorted(secret_engine["config"].items())), + secret_config=secret_config, + extra_config=extra_config + ) + ) + self.local_secret_engines.sort(key=lambda x: x.type) + except Exception as e: + log.exception(e) + log.debug('Finished reading secrets engines from config file.') + + def configure_secrets_engines(self): + log.info('Processing and configuring secrets engines...') + new_secrets_engines = [] + for secret_engine in self.local_secret_engines: + log.debug('Checking if secret engine "%s" at path "%s" is enabled...', + secret_engine.type, + secret_engine.path) + if secret_engine in self.remote_secret_engines: + log.debug( + 'Secret engine "%s" at path "%s" is already enabled. Tuning...', + secret_engine.type, + secret_engine.path) + + self.client.sys.tune_mount_configuration( + path=secret_engine.path, + description=secret_engine.description, + default_lease_ttl=secret_engine.config["default_lease_ttl"], + max_lease_ttl=secret_engine.config["max_lease_ttl"] + ) + log.debug('Secret engine "%s" at path "%s" is tuned.', + secret_engine.type, secret_engine.path) + else: + log.debug( + 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', + secret_engine.type, + secret_engine.path) + new_secrets_engines.append(secret_engine.type) + self.client.sys.enable_secrets_engine( + backend_type=secret_engine.type, + path=secret_engine.path, + description=secret_engine.description, + config=secret_engine.config + ) + log.debug('Secret engine " % s" at path " % s" is enabled.', + secret_engine.type, secret_engine.path) + + if secret_engine.secret_config != None: + log.info( + 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + self.client.secrets.activedirectory.configure( + **secret_engine.secret_config + ) + if secret_engine.type == 'database': + self.client.secrets.database.configure( + **secret_engine.secret_config + ) + + log.info( + 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.extra_config != None: + log.info( + 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + # Get roles from vault + existing_roles = None + try: + existing_roles = self.client.secrets.activedirectory.list_roles() + log.debug(existing_roles) + except Exception as e: + log.exception(e) + + # Add new roles + local_roles = secret_engine.extra_config['roles'] + for key in local_roles: + log.debug('AD Role ["%s"] -> Role %s', + str(key), local_roles[key]) + try: + self.client.secrets.activedirectory.create_or_update_role( + name=key, + service_account_name=local_roles[key]['service_account_name'], + ttl=local_roles[key]['ttl'] + ) + except Exception as e: + log.exception(e) + raise salt.exceptions.SaltInvocationError(e) + + # Remove missing roles + if existing_roles != None: + for role in existing_roles: + if role in {k.lower(): v for k, v in local_roles.items()}: + log.debug( + 'AD role ["%s"] exists in configuration, no cleanup necessary', role) + else: + log.info( + 'Ad role ["%s"] does not exists in configuration, deleting...', role) + self.client.secrets.activedirectory.delete_role( + name=role + ) + log.info( + 'AD role has been ["%s"] deleted.', role) + else: + log.debug( + 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type + ) + log.info('Finished proccessing and configuring secrets engines.') + + # Build return object + self.ret['old'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in self.remote_secret_engines])) + + if len(new_secrets_engines) > 0: + self.ret['new'] = json.loads(json.dumps(new_secrets_engines)) + else: + self.ret['new'] = "No changes" + + def cleanup_secrets_engines(self): + log.info('Cleaning up secrets engines...') + has_changes = False + for secret_engine in self.remote_secret_engines: + if not (secret_engine.type == "system" or + secret_engine.type == "cubbyhole" or + secret_engine.type == "identity" or + secret_engine.type == "generic"): + if secret_engine in self.local_secret_engines: + log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', + secret_engine.type, secret_engine.path) + else: + log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', + secret_engine.type, secret_engine.path) + has_changes = True + self.client.sys.disable_secrets_engine( + path=secret_engine.path + ) + log.info('Secrets engine "%s" at path "%s" is disabled.', + secret_engine.type, secret_engine.type) + log.info('Finished cleaning up secrets engines.') + + if has_changes: + self.ret['new'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in self.local_secret_engines])) + + def run(self): + """ + Control the executions + """ + log.info('-------------------------------------') + self.client = __utils__['vault.build_client']() + self.get_remote_secrets_engines() + self.get_local_secrets_engines() + self.configure_secrets_engines() + self.cleanup_secrets_engines() + log.info('-------------------------------------') + + return self.ret + + +class VaultAuditManager(): + """ + Module for handling Vault audit devices + """ + client = None + remote_audit_devices = [] + local_audit_devices = [] + config_path = '' + ret = {} + + def __init__(self, config_path): + log.info("Initializing Vault Audit Manager...") + self.config_path = config_path + + def get_remote_audit_devices(self): + log.info("Retrieving audit devices from vault...") + try: + audit_devices_resp = self.client.sys.list_enabled_audit_devices() + for device in audit_devices_resp['data']: + audit_device = audit_devices_resp[device] + self.remote_audit_devices.append( + VaultAuditDevice( + type=audit_device['type'], + path=(audit_device["path"] + if 'path' in audit_device else device), + description=audit_device["description"], + options=json.dumps(audit_device["options"]) + ) + ) + except Exception as e: + log.exception(e) + log.info('Finished retrieving audit devices from vault.') + + def get_local_audit_devices(self): + log.info("Loading audit devices from local config...") + config = __utils__['vault.load_config_file']( + config_path=self.config_path) + + if config: + for audit_device in config["audit-devices"]: + if 'options' in audit_device: + options = json.dumps(audit_device["options"]) + log.debug(options) + + self.local_audit_devices.append( + VaultAuditDevice( + type=audit_device["type"], + path=audit_device["path"], + description=audit_device["description"], + options=options + ) + ) + log.info('Finished loading audit devices from local config.') + + def configure_audit_devices(self): + log.info('Processing and configuring audit devices...') + new_audit_devices = [] + for audit_device in self.local_audit_devices: + log.debug('Checking if audit device "%s" at path "%s" is enabled...', + audit_device.type, audit_device.path) + + if audit_device in self.remote_audit_devices: + log.debug('Audit device "%s" at path "%s" is already enabled.', + audit_device.type, audit_device.path) + else: + log.debug( + 'Audit device "%s" at path "%s" is not enabled. Enabling now...', audit_device.type, audit_device.path) + new_audit_devices.append(audit_device.type) + self.client.sys.enable_audit_device( + device_type=audit_device.type, + path=audit_device.path, + description=audit_device.description, + options=json.loads(audit_device.options) + ) + log.debug('Audit device "%s" at path "%s" is enabled.', + audit_device.type, audit_device.path) + + log.info('Finished processing audit devices.') + # Build return object + self.ret['old'] = json.loads(json.dumps( + [ob.type for ob in self.remote_audit_devices])) + + if len(new_audit_devices) > 0: + self.ret['new'] = json.loads(json.dumps(new_audit_devices)) + else: + self.ret['new'] = "No changes" + + def cleanup_audit_devices(self): + log.info('Cleaning up audit devices...') + has_changes = False + for audit_device in self.remote_audit_devices: + if audit_device not in self.local_audit_devices: + log.info('Disabling audit device "%s" at path "%s"...', + audit_device.type, audit_device.path) + has_changes = True + self.client.sys.disable_audit_device( + path=audit_device.path + ) + log.info('Finished cleaning up audit devices.') + + if has_changes: + self.ret['new'] = json.loads(json.dumps( + [ob.type for ob in self.local_audit_devices])) + + def run(self): + log.info('-------------------------------------') + self.client = __utils__['vault.build_client']() + self.get_remote_audit_devices() + self.get_local_audit_devices() + self.configure_audit_devices() + self.cleanup_audit_devices() + log.info('-------------------------------------') + return self.ret + + +def auth_methods_synced(config_path): + """ + Ensure all auth method defined in the config file are synced with vault + + :param config_path: path to configuration file for auth methods + :returns: Result of the execution + :rtype: dict + """ + return VaultAuthManager(config_path).run() + + +def policies_synced(policies_dir_path): + """ + Ensure all policies defined are synced with vault + + :param policies_dir_path: path to directory contains all policies + :returns: Result of the execution + :rtype: dict + """ + return VaultPolicyManager(policies_dir_path).run() + + +def secrets_engines_synced(config_path): + """ + Ensure all secrets engines defined in the config file are synced with vault + + :param config_path: path to configuration file for secrets engines + :returns: Result of the execution + :rtype: dict + """ + return VaultSecretsManager(config_path).run() + + +def audit_devices_synced(config_path): + """ + Ensure all audit devices defined in the config file are synced with vault + + :param config_path: path to configuration file for audit devices + :returns: Result of the execution + :rtype: dict + """ + return VaultAuditManager(config_path).run() diff --git a/salt/_states/vault.py b/salt/_states/vault.py index a5fe6b7..afdcb88 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -3,11 +3,10 @@ import logging import os import json +import sys -import salt.config -import salt.syspaths -import salt.utils -import salt.exceptions +import hvac +import boto3 log = logging.getLogger(__name__) @@ -21,6 +20,7 @@ __all__ = ['initialize'] + def __virtual__(): return DEPS_INSTALLED @@ -40,9 +40,9 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): :rtype: dict """ ret = {'name': name, - 'comment': '', - 'result': '', - 'changes': {}} + 'comment': '', + 'result': '', + 'changes': {}} client = hvac.Client(url='http://localhost:8200') @@ -71,25 +71,26 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): } } - #upload root token ssm parameter store + # upload root token ssm parameter store if is_success: ssm_client = boto3.client('ssm') - #saving root token + # saving root token ssm_client.put_parameter( - Name = '/{}/{}'.format(ssm_path, 'root_token'), - Value = root_token, - Type = "SecureString", - Overwrite = True + Name='/{}/{}'.format(ssm_path, 'root_token'), + Value=root_token, + Type="SecureString", + Overwrite=True ) - #saving recovery keys + # saving recovery keys ssm_client.put_parameter( - Name = '/{}/{}'.format(ssm_path, 'recovery_keys'), - Value = json.dumps(recovery_keys), - Type = "SecureString", - Overwrite = True + Name='/{}/{}'.format(ssm_path, 'recovery_keys'), + Value=json.dumps(recovery_keys), + Type="SecureString", + Overwrite=True ) ret['comment'] = 'Vault has {}initialized'.format( '' if is_success else 'failed to be ') return ret + diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py new file mode 100644 index 0000000..062444e --- /dev/null +++ b/salt/_utils/vault.py @@ -0,0 +1,95 @@ + +from __future__ import absolute_import, print_function, unicode_literals +import base64 +import logging +import os +import requests +import json +import time +import yaml +import hvac +import hashlib +from collections import OrderedDict +from functools import wraps + + +log = logging.getLogger(__name__) +logging.getLogger("requests").setLevel(logging.WARNING) + + +def build_client(url='http://localhost:8200', + token=None, + cert=None, + verify=True, + timeout=30, + proxies=None, + allow_redirects=True, + session=None): + + client = hvac.Client(url=url) + + client.token = os.environ.get('VAULT_TOKEN') + + return client + + +def load_config_file(config_path): + config = None + with open(os.path.join(config_path), 'r') as fd: + try: + config = yaml.load(fd) + + except yaml.YAMLError as e: + log.critical("Unable to load conf file: " + str(e)) + return False + return config + + +class VaultError(Exception): + def __init__(self, message=None, errors=None): + if errors: + message = ', '.join(errors) + + self.errors = errors + + super(VaultError, self).__init__(message) + + +class InvalidRequest(VaultError): + pass + + +class Unauthorized(VaultError): + pass + + +class Forbidden(VaultError): + pass + + +class InvalidPath(VaultError): + pass + + +class RateLimitExceeded(VaultError): + pass + + +class InternalServerError(VaultError): + pass + + +class VaultNotInitialized(VaultError): + pass + + +class VaultDown(VaultError): + pass + + +class UnexpectedError(VaultError): + pass + + +def vault_error(): + return VaultError diff --git a/salt/vault/configure.sls b/salt/vault/configure.sls index f54f51d..13b8730 100644 --- a/salt/vault/configure.sls +++ b/salt/vault/configure.sls @@ -1,9 +1,6 @@ {% from "vault/map.jinja" import vault with context %} -include: - - .service - {# only configure if vault is not in dev_mode #} {%- if not vault.dev_mode %} diff --git a/salt/vault/initialize.sls b/salt/vault/initialize.sls index 5732c94..3bfc97d 100644 --- a/salt/vault/initialize.sls +++ b/salt/vault/initialize.sls @@ -6,3 +6,4 @@ vault_initialize_server: - recovery_shares: {{ vault.recovery_shares }} - recovery_threshold: {{ vault.recovery_threshold }} - ssm_path: {{ vault.ssm_path }} + diff --git a/salt/vault/install.sls b/salt/vault/install.sls index 3346bbb..7ecb17c 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -19,7 +19,14 @@ vault_data_dir: - name: /etc/vault - user: vault - group: vault - - mode: '0700' + - mode: '0755' + +vault_logs_dir: + file.directory: + - name: /etc/vault/logs + - user: vault + - group: vault + - mode: '0755' vault_package_install_file_directory: file.directory: @@ -76,7 +83,6 @@ install_pip_executable: curl -L "https://bootstrap.pypa.io/get-pip.py" > get_pip.py sudo python get_pip.py pip==18.0.0 rm get_pip.py - - reload_modules: True install_python_dependencies: @@ -84,3 +90,4 @@ install_python_dependencies: - pkgs: {{ vault.module_dependencies.pip_deps | json }} - reload_modules: True - ignore_installed: True + diff --git a/salt/vault/maps/osfamilymap.yaml b/salt/vault/maps/osfamilymap.yaml index fdf245d..c27b7ce 100644 --- a/salt/vault/maps/osfamilymap.yaml +++ b/salt/vault/maps/osfamilymap.yaml @@ -15,6 +15,7 @@ RedHat: - hvac - testinfra - boto3 + - pyotp Debian: @@ -33,3 +34,4 @@ Debian: - hvac - testinfra - boto3 + - pyotp diff --git a/salt/vault/service.sls b/salt/vault/service.sls index 7befad4..d386160 100644 --- a/salt/vault/service.sls +++ b/salt/vault/service.sls @@ -20,5 +20,3 @@ vault_service_running: - watch: - archive: vault_package_install_archive_extracted - file: vault_configure_service_file - - diff --git a/salt/vault/sync.sls b/salt/vault/sync.sls new file mode 100644 index 0000000..2b13e7b --- /dev/null +++ b/salt/vault/sync.sls @@ -0,0 +1,23 @@ +{% from "vault/map.jinja" import vault with context %} + +Sync Vault Policies: + module.run: + - vault.policies_synced: + - policies_dir_path: "{{ vault.config_dir_path }}/policies" + +Sync Vault Authentication Methods: + module.run: + - vault.auth_methods_synced: + - config_path: "{{ vault.config_dir_path }}/auth_config.yml" + - required: + - module.run: Sync Vault Policies + +Sync Vault Secrets Engines: + module.run: + - vault.secrets_engines_synced: + - config_path: "{{ vault.config_dir_path }}/secrets_config.yml" + +Sync Vault Audit Devices: + module.run: + - vault.audit_devices_synced: + - config_path: "{{ vault.config_dir_path }}/audit_config.yml" diff --git a/scripts/appscript.sh b/scripts/appscript.sh index 1efe12a..ba174d2 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -1,42 +1,48 @@ #!/bin/bash set -eu -o pipefail -[[ $# -lt 5 ]] && { - echo "Usage $0 " >&2 - echo " Example: $0 bucket-foo/randomid/salt.zip 1.1.3 vault-data-table bb0392ea-f31b-4ef2-af9e-be18661a8246 vault/dev/token" >&2 +[[ $# -lt 6 ]] && { + echo "Usage $0 " >&2 + echo " Example: $0 bucket-foo/randomid/salt.zip bucket-foo/randomid/configs.zip 1.1.3 + vault-data-table xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx vault/dev/token + https://github.com/PremiereGlobal/vault-admin/releases/download/0.3.1/vadmin-linux-0.3.1.zip" >&2 exit 1 } # Required vars SALT_ARCHIVE=$1 -VAULT_VERSION=$2 -DYNAMODB_TABLE=$3 -KMS_KEY_ID=$4 -SSM_PATH=$5 +CONFIGS_ARCHIVE=$2 +VAULT_VERSION=$3 +DYNAMODB_TABLE=$4 +KMS_KEY_ID=$5 +SSM_PATH=$6 # Internal vars AWS_AZ=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone) -SALT_DIR="/srv/salt" -ARCHIVE_NAME="salt_formula.zip" # Export standard aws envs export AWS_DEFAULT_REGION=${AWS_AZ:0:${#AWS_AZ} - 1} # Export Vault local address export VAULT_ADDR=http://127.0.0.1:8200 +export SALT_DIR="/srv/salt" +export CONFIGURATION_PATH="/etc/vault/configs" +export ARCHIVE_FILE_NAME="salt_formula.zip" +export CONFIGS_FILE_NAME="vault_configs.zip" + +yum install unzip -y echo "[appscript]: Ensuring default salt srv location exists, ${SALT_DIR}..." mkdir -p ${SALT_DIR} echo "[appscript]: Download salt formula archive file from s3://${SALT_ARCHIVE}..." -aws s3 cp "s3://${SALT_ARCHIVE}" ${ARCHIVE_NAME} +aws s3 cp "s3://${SALT_ARCHIVE}" ${ARCHIVE_FILE_NAME} echo "[appscript]: Unzip salt formula archive file to ${SALT_DIR}" -yum install unzip -y -unzip ${ARCHIVE_NAME} -d ${SALT_DIR} +unzip ${ARCHIVE_FILE_NAME} -d ${SALT_DIR} -echo "[appscript]: Remove salt formula archive file ${ARCHIVE_NAME}" -rm ${ARCHIVE_NAME} +echo "[appscript]: Remove salt formula archive file ${ARCHIVE_FILE_NAME}" +rm ${ARCHIVE_FILE_NAME} echo "[appscript]: Updating salt grains..." salt-call --local saltutil.sync_grains @@ -46,18 +52,46 @@ echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf echo "[appscript]: Setting required salt grains for vault..." salt-call --local grains.setval vault \ -"{'version':'${VAULT_VERSION}', 'dynamodb_table':'${DYNAMODB_TABLE}', 'kms_key_id':'${KMS_KEY_ID}', 'region':'${AWS_DEFAULT_REGION}', 'ssm_path': '${SSM_PATH}'}" +"{'version':'${VAULT_VERSION}', 'dynamodb_table':'${DYNAMODB_TABLE}', 'kms_key_id':'${KMS_KEY_ID}', 'region':'${AWS_DEFAULT_REGION}', 'ssm_path': '${SSM_PATH}', 'config_dir_path': '${CONFIGURATION_PATH}'}" -echo "[appscript]: Applying the vault install and configure states..." -salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee /var/log/salt_vault.log +echo "[appscript]: Update minion config to allow module.run..." +printf 'use_superseded:\n - module.run\n' >> /etc/salt/minion echo "[appscript]: Updating salt states to include custom vault's states..." -salt-call --local saltutil.sync_states +salt-call --local saltutil.sync_all + +echo "[appscript]: Applying the vault install and configure states..." +salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee /var/log/salt_vault.log echo "[appscript]: Initializing the vault..." salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee /var/log/salt_vault_initialize.log -echo "[appscript]: Vault's status" +# Applying configurations per specific implementation +if [ "${CONFIGS_ARCHIVE}" != "n/a" ]; +then + echo "[appscript]: Retrieving root token to assist configuration provisioning..." + export VAULT_TOKEN=$(aws ssm get-parameter --name /${SSM_PATH}/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') + + echo "[appscript]: Ensuring default vault configs location exists, ${CONFIGURATION_PATH}..." + mkdir -p ${CONFIGURATION_PATH} + + echo "[appscript]: Download vault configs archive file from s3://${CONFIGS_ARCHIVE}..." + aws s3 cp "s3://${CONFIGS_ARCHIVE}" ${CONFIGS_FILE_NAME} + + echo "[appscript]: Unzip vault configs archive file to ${CONFIGURATION_PATH}..." + unzip ${CONFIGS_FILE_NAME} -d ${CONFIGURATION_PATH} + + echo "[appscript]: Remove vault configs archive file ${CONFIGS_FILE_NAME}" + rm ${CONFIGS_FILE_NAME} + + echo "[appscript]: Sync configurations with the vault..." + salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee /var/log/salt_vault_sync.log + +else + echo "[appscript]: No vault configurations provided. Skipping configuration steps..." +fi + +echo "[appscript]: Retrieving Vault's status" vault status echo "[appscript]: Completed appscript vault successfully!" diff --git a/tests/bucket/main.tf b/tests/bucket/main.tf new file mode 100644 index 0000000..1852046 --- /dev/null +++ b/tests/bucket/main.tf @@ -0,0 +1,18 @@ +provider aws { + region = "us-east-1" +} + +resource "random_id" "name" { + byte_length = 6 + prefix = "terraform-aws-vault-" +} + +module "bucket" { + source = "../../modules/bucket" + + providers = { + aws = "aws" + } + + bucket_name = "${random_id.name}" +} diff --git a/tests/module_test.go b/tests/module_test.go new file mode 100644 index 0000000..ca18252 --- /dev/null +++ b/tests/module_test.go @@ -0,0 +1,67 @@ +package testing + +import ( + "io/ioutil" + "log" + "testing" + + "github.com/gruntwork-io/terratest/modules/terraform" +) + +func TestModule(t *testing.T) { + files, err := ioutil.ReadDir("./") + if err != nil { + log.Fatal(err) + } + + for _, f := range files { + // look for directories with test cases in it + if f.IsDir() { + if f.Name() != "vendor" { + testFiles, testErr := ioutil.ReadDir(f.Name()) + if testErr != nil { + log.Fatal(testErr) + } + + // see if a prereq directory exists + for _, testF := range testFiles { + if testF.IsDir() { + if testF.Name() == "prereq" { + directory := f.Name() + "/" + testF.Name() + runTerraformPreReq(t, directory) + } + } + } + + // run terraform code + runTerraform(t, f.Name()) + } + } + } +} + +// The prequisite function runs the terraform code but doesn't destroy it afterwards so that the state can be used for further testing +func runTerraformPreReq(t *testing.T, directory string) { + terraformOptions := &terraform.Options{ + TerraformDir: directory, + NoColor: true, + } + + // This will run `terraform init` and `terraform apply` and fail the test if there are any errors + terraform.InitAndApply(t, terraformOptions) +} + +func runTerraform(t *testing.T, directory string) { + terraformOptions := &terraform.Options{ + // The path to where your Terraform code is located + TerraformDir: directory, + // Disable color output + NoColor: true, + } + + // At the end of the test, run `terraform destroy` to clean up any resources that were created + defer terraform.Destroy(t, terraformOptions) + + // This will run `terraform init` and `terraform apply` and fail the test if there are any errors + terraform.InitAndApply(t, terraformOptions) +} diff --git a/variables.tf b/variables.tf index c4a77a1..222da2d 100644 --- a/variables.tf +++ b/variables.tf @@ -90,6 +90,11 @@ variable "ami_name_regex" { type = "string" default = "spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2" } +variable "configs_path" { + description = "Path to directory that contains configuration files for vault" + type = "string" + default = "" +} variable "instance_type" { default = "t2.medium" From 724ed85755df780104f7053df53a1f2571ac311f Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 7 Aug 2019 14:52:09 -0400 Subject: [PATCH 03/34] Updates to comply with linting/format --- .bumpversion.cfg | 0 .editorconfig | 7 ++++++- .gitignore | 0 .travis.yml | 0 README.md | 1 - _docs/MAIN.md | 0 main.tf | 2 +- salt/_modules/vault.py | 15 ++++++++------- salt/_states/vault.py | 7 +++---- salt/_utils/vault.py | 14 +++++++------- salt/vault/init.sls | 1 - salt/vault/initialize.sls | 1 - salt/vault/install.sls | 1 - salt/vault/maps/defaults.yaml | 1 - scripts/appscript.sh | 8 ++++---- 15 files changed, 29 insertions(+), 29 deletions(-) mode change 100644 => 100755 .bumpversion.cfg mode change 100644 => 100755 .editorconfig mode change 100644 => 100755 .gitignore mode change 100644 => 100755 .travis.yml mode change 100644 => 100755 _docs/MAIN.md diff --git a/.bumpversion.cfg b/.bumpversion.cfg old mode 100644 new mode 100755 diff --git a/.editorconfig b/.editorconfig old mode 100644 new mode 100755 index 612c89e..a2f7c68 --- a/.editorconfig +++ b/.editorconfig @@ -13,7 +13,12 @@ charset = utf-8 trim_trailing_whitespace = false [*.py] -indent_size = 4 +indent_style = space +indent_size = 2 + +[*.go] +indent_style = tab +indent_size = 2 [Makefile] indent_style = tab diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/.travis.yml b/.travis.yml old mode 100644 new mode 100755 diff --git a/README.md b/README.md index d5e6e18..d7edaf9 100755 --- a/README.md +++ b/README.md @@ -49,4 +49,3 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | |------|-------------| | vault\_url | URL to access Vault UI | - diff --git a/_docs/MAIN.md b/_docs/MAIN.md old mode 100644 new mode 100755 diff --git a/main.tf b/main.tf index a95204c..e39d1d1 100644 --- a/main.tf +++ b/main.tf @@ -300,7 +300,7 @@ locals { # Manage autoscaling group module "autoscaling_group" { - source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.2" + source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.6" Name = "${var.name}-${var.environment}" OnFailureAction = "" diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 1153a22..9eae2d0 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -29,6 +29,7 @@ log.debug('Unable to import the dependencies...') DEPS_INSTALLED = False + class InsufficientParameters(Exception): pass @@ -101,7 +102,7 @@ def __eq__(self, other): def __repr__(self): return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + self.get_unique_id())) class VaultSecretEngine: @@ -159,7 +160,7 @@ def __eq__(self, other): def __repr__(self): return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + self.get_unique_id())) class VaultAuditDevice: @@ -176,7 +177,7 @@ def __init__(self, type, path, description, options): def get_device_unique_id(self): unique_str = str(self.type + self.path + - self.description + str(self.options)) + self.description + str(self.options)) sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() return sha256_hash @@ -186,7 +187,7 @@ def __eq__(self, other): def __repr__(self): return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % (self.path, self.type, self.description, str(self.options), - self.get_device_unique_id())) + self.get_device_unique_id())) class VaultPolicyManager(): @@ -342,7 +343,7 @@ def get_remote_auth_methods(self): def get_local_auth_methods(self): log.info('Loading auth methods form local config file: %s', - self.config_path) + self.config_path) config = __utils__['vault.load_config_file']( config_path=self.config_path) for auth_method in config["auth-methods"]: @@ -700,7 +701,7 @@ def cleanup_secrets_engines(self): path=secret_engine.path ) log.info('Secrets engine "%s" at path "%s" is disabled.', - secret_engine.type, secret_engine.type) + secret_engine.type, secret_engine.type) log.info('Finished cleaning up secrets engines.') if has_changes: @@ -815,7 +816,7 @@ def cleanup_audit_devices(self): for audit_device in self.remote_audit_devices: if audit_device not in self.local_audit_devices: log.info('Disabling audit device "%s" at path "%s"...', - audit_device.type, audit_device.path) + audit_device.type, audit_device.path) has_changes = True self.client.sys.disable_audit_device( path=audit_device.path diff --git a/salt/_states/vault.py b/salt/_states/vault.py index afdcb88..5b33090 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -40,9 +40,9 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): :rtype: dict """ ret = {'name': name, - 'comment': '', - 'result': '', - 'changes': {}} + 'comment': '', + 'result': '', + 'changes': {}} client = hvac.Client(url='http://localhost:8200') @@ -93,4 +93,3 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): ret['comment'] = 'Vault has {}initialized'.format( '' if is_success else 'failed to be ') return ret - diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index 062444e..867a547 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -18,13 +18,13 @@ def build_client(url='http://localhost:8200', - token=None, - cert=None, - verify=True, - timeout=30, - proxies=None, - allow_redirects=True, - session=None): + token=None, + cert=None, + verify=True, + timeout=30, + proxies=None, + allow_redirects=True, + session=None): client = hvac.Client(url=url) diff --git a/salt/vault/init.sls b/salt/vault/init.sls index c6d0337..c37ba3c 100644 --- a/salt/vault/init.sls +++ b/salt/vault/init.sls @@ -3,4 +3,3 @@ include: - .configure - .service - .firewall - diff --git a/salt/vault/initialize.sls b/salt/vault/initialize.sls index 3bfc97d..5732c94 100644 --- a/salt/vault/initialize.sls +++ b/salt/vault/initialize.sls @@ -6,4 +6,3 @@ vault_initialize_server: - recovery_shares: {{ vault.recovery_shares }} - recovery_threshold: {{ vault.recovery_threshold }} - ssm_path: {{ vault.ssm_path }} - diff --git a/salt/vault/install.sls b/salt/vault/install.sls index 7ecb17c..e86a00f 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -90,4 +90,3 @@ install_python_dependencies: - pkgs: {{ vault.module_dependencies.pip_deps | json }} - reload_modules: True - ignore_installed: True - diff --git a/salt/vault/maps/defaults.yaml b/salt/vault/maps/defaults.yaml index 22a2694..4fdd181 100644 --- a/salt/vault/maps/defaults.yaml +++ b/salt/vault/maps/defaults.yaml @@ -10,4 +10,3 @@ vault: max_lease_ttl: 192h #one week recovery_shares: 5 recovery_threshold: 3 - diff --git a/scripts/appscript.sh b/scripts/appscript.sh index ba174d2..47289b0 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -3,9 +3,8 @@ set -eu -o pipefail [[ $# -lt 6 ]] && { echo "Usage $0 " >&2 - echo " Example: $0 bucket-foo/randomid/salt.zip bucket-foo/randomid/configs.zip 1.1.3 - vault-data-table xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx vault/dev/token - https://github.com/PremiereGlobal/vault-admin/releases/download/0.3.1/vadmin-linux-0.3.1.zip" >&2 + echo " Example: $0 bucket-foo/randomid/salt.zip bucket-foo/randomid/configs.zip 1.2.0 + vault-data-table xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx vault/dev/token /etc/vault/configs" >&2 exit 1 } @@ -70,7 +69,8 @@ salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 if [ "${CONFIGS_ARCHIVE}" != "n/a" ]; then echo "[appscript]: Retrieving root token to assist configuration provisioning..." - export VAULT_TOKEN=$(aws ssm get-parameter --name /${SSM_PATH}/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') + VAULT_TOKEN=$(aws ssm get-parameter --name /"${SSM_PATH}"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') + export VAULT_TOKEN echo "[appscript]: Ensuring default vault configs location exists, ${CONFIGURATION_PATH}..." mkdir -p ${CONFIGURATION_PATH} From 6f11b467054aa453507561b18534fc50c83c93c2 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Thu, 8 Aug 2019 21:20:47 -0400 Subject: [PATCH 04/34] Adds comments to vault modules --- README.md | 1 + salt/_modules/vault.py | 49 ++++++++++++++++++++++++++++++++++++------ salt/_utils/vault.py | 21 +++++++++++++++++- 3 files changed, 63 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index d7edaf9..d5e6e18 100755 --- a/README.md +++ b/README.md @@ -49,3 +49,4 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | |------|-------------| | vault\_url | URL to access Vault UI | + diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 9eae2d0..361fce8 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -51,8 +51,6 @@ class VaultAuthMethod: def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): """ - Instanciate class - :param type: Authentication type :type type: str :param path: Authentication mount point @@ -118,8 +116,6 @@ class VaultSecretEngine: def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): """ - Instantiate Class - :param type: Secret type :type type: str :param path: Secret mount point @@ -170,6 +166,14 @@ class VaultAuditDevice: options = None def __init__(self, type, path, description, options): + """initialize class + + Arguments: + type {str} -- Specifies the type of the audit device. + path {str} -- Specifies the path in which to enable the audit device. This is part of the request URL. + description {str} -- Human-friendly description of the audit device. + options {str} -- Configuration options to pass to the audit device itself. This is dependent on the audit device type. + """ self.type = type self.path = path.replace("/", "") self.description = (description if description else "") @@ -201,6 +205,11 @@ class VaultPolicyManager(): ret = {} def __init__(self, policies_dir_path): + """ + + Arguments: + policies_dir_path {str} -- Specify path to the directory contains all policies + """ log.info("Initializing Vault Policy Manager...") self.policies_folder = policies_dir_path @@ -291,7 +300,9 @@ def cleanup_policies(self): def run(self): """ - Control the executions + + Returns: + dict -- results of the execution """ log.info('-------------------------------------') self.client = __utils__['vault.build_client']() @@ -315,6 +326,11 @@ class VaultAuthManager(): client = None def __init__(self, config_path): + """ + + Arguments: + config_path {str} -- Path of the yaml file that contains configuration options for authentication methods + """ log.info("Initializing Vault Auth Manager...") self.config_path = config_path @@ -487,7 +503,9 @@ def cleanup_auth_methods(self): def run(self): """ - Control the executions + + Returns: + dict -- results of the execution """ log.info('-------------------------------------') self.client = __utils__['vault.build_client']() @@ -511,6 +529,11 @@ class VaultSecretsManager(): ret = {} def __init__(self, config_path): + """ + + Arguments: + config_path {str} -- Path of the yaml file that contains configuration options for secrets engines + """ log.info("Initializing Vault Secret Manager...") self.config_path = config_path @@ -710,7 +733,9 @@ def cleanup_secrets_engines(self): def run(self): """ - Control the executions + + Returns: + dict -- results of the execution """ log.info('-------------------------------------') self.client = __utils__['vault.build_client']() @@ -734,6 +759,11 @@ class VaultAuditManager(): ret = {} def __init__(self, config_path): + """ + + Arguments: + config_path {str} -- Path of the yaml file that contains configuration options for audit devices + """ log.info("Initializing Vault Audit Manager...") self.config_path = config_path @@ -828,6 +858,11 @@ def cleanup_audit_devices(self): [ob.type for ob in self.local_audit_devices])) def run(self): + """ + + Returns: + dict -- results of the execution + """ log.info('-------------------------------------') self.client = __utils__['vault.build_client']() self.get_remote_audit_devices() diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index 867a547..31553c6 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -1,4 +1,3 @@ - from __future__ import absolute_import, print_function, unicode_literals import base64 import logging @@ -25,6 +24,18 @@ def build_client(url='http://localhost:8200', proxies=None, allow_redirects=True, session=None): + """Instantiates and returns hvac Client class for HashiCorp’s Vault. + + Keyword Arguments: + url {str} -- Base URL for the Vault instance being addressed. (default: {'http://localhost:8200'}) + token {str} -- Authentication token to include in requests sent to Vault. (default: {None}) + cert {tuple} -- Certificates for use in requests sent to the Vault instance. This should be a tuple with the certificate and then key. (default: {None}) + verify {bool} -- Either a boolean to indicate whether TLS verification should be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification. (default: {True}) + timeout {int} -- The timeout value for requests sent to Vault. (default: {30}) + proxies {dict} -- Proxies to use when performing requests (default: {None}) + allow_redirects {bool} -- Whether to follow redirects when sending requests to Vault. (default: {True}) + session {request.Session} -- Optional session object to use when performing request. (default: {None}) + """ client = hvac.Client(url=url) @@ -34,6 +45,14 @@ def build_client(url='http://localhost:8200', def load_config_file(config_path): + """Retrieve config file from provided path + + Arguments: + config_path {str} -- path to config file + + Returns: + [obj] -- parsed object of the config + """ config = None with open(os.path.join(config_path), 'r') as fd: try: From 9f120372018028e16d5a27d0e471481580c4beb5 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Mon, 26 Aug 2019 21:05:56 -0400 Subject: [PATCH 05/34] Adds a basic test case for the vault stack --- .gitignore | 3 ++ tests/Gopkg.lock | 94 +++++++++++++++++++++++++++++++++++++++++ tests/Gopkg.toml | 34 +++++++++++++++ tests/base/main.tf | 71 +++++++++++++++++++++++++++++++ tests/base/variables.tf | 59 ++++++++++++++++++++++++++ tests/bucket/main.tf | 18 -------- 6 files changed, 261 insertions(+), 18 deletions(-) create mode 100644 tests/Gopkg.lock create mode 100644 tests/Gopkg.toml create mode 100644 tests/base/main.tf create mode 100644 tests/base/variables.tf delete mode 100644 tests/bucket/main.tf diff --git a/.gitignore b/.gitignore index 3439d2c..212be99 100755 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,6 @@ # other uncessarry files .DS_Store + +# ignore go files +vendor/ diff --git a/tests/Gopkg.lock b/tests/Gopkg.lock new file mode 100644 index 0000000..99dac1b --- /dev/null +++ b/tests/Gopkg.lock @@ -0,0 +1,94 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:9a125bb28b817431abc860b051cf10f41febe7830749e6d460826c4e253994be" + name = "github.com/gruntwork-io/terratest" + packages = [ + "modules/collections", + "modules/customerrors", + "modules/files", + "modules/logger", + "modules/retry", + "modules/shell", + "modules/ssh", + "modules/terraform", + ] + pruneopts = "UT" + revision = "367843c5fa8429d84d2e9b78402546316b54ee91" + version = "v0.17.6" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + branch = "master" + digest = "1:4d40045409ff4ad024dc87c28ea31ff4207b574764e0b8f6ee6dcbb3b420285a" + name = "golang.org/x/crypto" + packages = [ + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "internal/chacha20", + "internal/subtle", + "poly1305", + "ssh", + "ssh/agent", + ] + pruneopts = "UT" + revision = "60c769a6c58655dab1b9adac0d58967dd517cfba" + +[[projects]] + branch = "master" + digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70" + name = "golang.org/x/net" + packages = ["context"] + pruneopts = "UT" + revision = "74dc4d7220e7acc4e100824340f3e66577424772" + +[[projects]] + branch = "master" + digest = "1:9289797869517c73400e7b5a93beefbfc86e06f56e3ab03f38fb0d575e685277" + name = "golang.org/x/sys" + packages = ["cpu"] + pruneopts = "UT" + revision = "fde4db37ae7ad8191b03d30d27f258b5291ae4e3" + +[[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = ["github.com/gruntwork-io/terratest/modules/terraform"] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/tests/Gopkg.toml b/tests/Gopkg.toml new file mode 100644 index 0000000..d951b36 --- /dev/null +++ b/tests/Gopkg.toml @@ -0,0 +1,34 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/gruntwork-io/terratest" + version = "0.17.5" + +[prune] + go-tests = true + unused-packages = true diff --git a/tests/base/main.tf b/tests/base/main.tf new file mode 100644 index 0000000..080265d --- /dev/null +++ b/tests/base/main.tf @@ -0,0 +1,71 @@ +locals { + test_prefix = "tf-vault-${random_string.this.result}" + key_pair_name = "${var.key_pair_name == "" ? aws_key_pair.this.0.id : var.key_pair_name}" + kms_key_id = "${var.kms_key_id == "" ? aws_kms_key.this.0.id : var.kms_key_id}" +} + +resource "random_string" "this" { + length = 6 + upper = false + special = false +} +resource "tls_private_key" "this" { + count = "${var.key_pair_name == "" ? 1 : 0}" + algorithm = "RSA" + rsa_bits = "4096" +} + +resource "aws_key_pair" "this" { + count = "${var.key_pair_name == "" ? 1 : 0}" + key_name = "${local.test_prefix}-rsa" + public_key = "${tls_private_key.this.0.public_key_openssh}" +} +resource "aws_kms_alias" "this" { + count = "${var.key_pair_name == "" ? 1 : 0}" + name = "alias/${local.test_prefix}" + target_key_id = "${aws_kms_key.this.0.key_id}" +} +resource "aws_kms_key" "this" { + count = "${var.kms_key_id == "" ? 1 : 0}" + description = "KSM Key for vault tests" + deletion_window_in_days = 10 + tags = { + Environment = "${var.environment}" + Name = "${local.test_prefix}" + } +} + +module "vault-py3" { + source = "../../" + environment = "${var.environment}" + desired_capacity = 1 + ami_owner = "${var.ami_owner}" + + name = "${local.test_prefix}-py3" + key_pair_name = "${local.key_pair_name}" + kms_key_id = "${local.kms_key_id}" + ec2_subnet_ids = "${var.ec2_subnet_ids}" + lb_subnet_ids = "${var.lb_subnet_ids}" + + cloudwatch_agent_url = "${var.cloudwatch_agent_url}" + + domain_name = "${var.domain_name}" + vault_version = "${var.vault_version}" + dynamodb_table = "${var.dynamodb_table}" + + watchmaker_config = "${var.watchmaker_config}" + + toggle_update = "B" +} + + +data "aws_route53_zone" "this" { + name = "${var.domain_name}" + private_zone = false +} + +data "aws_acm_certificate" "this" { + domain = "*.${var.domain_name}" + types = ["AMAZON_ISSUED"] + most_recent = true +} diff --git a/tests/base/variables.tf b/tests/base/variables.tf new file mode 100644 index 0000000..d48bb9f --- /dev/null +++ b/tests/base/variables.tf @@ -0,0 +1,59 @@ +variable "environment" { + description = "Type of environment -- must be one of: dev, test, prod" + type = "string" + default = "test" +} +variable "key_pair_name" { + description = "Keypair to associate to launched instances" + type = "string" + default = "" +} + +variable "ami_owner" { + description = "Account id/alias of the AMI owner" + type = "string" +} + +variable "ec2_subnet_ids" { + description = "List of subnets where EC2 instances will be launched" + type = "list" +} + +variable "lb_subnet_ids" { + description = "List of subnets to associate to the Load Balancer" + type = "list" +} + +variable "vault_version" { + description = "Version of Vault to be installed on servers" + type = "string" +} + +variable "kms_key_id" { + description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" + type = "string" + default = "" +} + +variable "domain_name" { + type = "string" + description = "Domain to provision test vault cluster" +} + +variable "dynamodb_table" { + description = "Name of the Dynamodb to be used as storage backend for Vault" + type = "string" + default = "" +} + +variable "cloudwatch_agent_url" { + type = "string" + description = "(Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" + default = "" +} + +variable "watchmaker_config" { + type = "string" + description = "(Optional) URL to a Watchmaker config file" + default = "" +} diff --git a/tests/bucket/main.tf b/tests/bucket/main.tf deleted file mode 100644 index 1852046..0000000 --- a/tests/bucket/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -provider aws { - region = "us-east-1" -} - -resource "random_id" "name" { - byte_length = 6 - prefix = "terraform-aws-vault-" -} - -module "bucket" { - source = "../../modules/bucket" - - providers = { - aws = "aws" - } - - bucket_name = "${random_id.name}" -} From 447b3ff13e492b9f5f6fc229d66cecef69e2bd58 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Mon, 26 Aug 2019 21:07:35 -0400 Subject: [PATCH 06/34] Updates to allow support salt py3 --- salt/_states/vault.py | 2 ++ salt/_utils/vault.py | 2 ++ salt/vault/firewall.sls | 10 +++++----- salt/vault/install.sls | 7 ++++--- salt/vault/maps/osfamilymap.yaml | 4 +--- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/salt/_states/vault.py b/salt/_states/vault.py index 5b33090..0063b54 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + from __future__ import absolute_import import logging diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index 31553c6..ff9c6f9 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + from __future__ import absolute_import, print_function, unicode_literals import base64 import logging diff --git a/salt/vault/firewall.sls b/salt/vault/firewall.sls index a0c1f5f..329a0e2 100644 --- a/salt/vault/firewall.sls +++ b/salt/vault/firewall.sls @@ -1,25 +1,25 @@ {% from "vault/map.jinja" import vault with context %} -firewalld_service: +firewalld_vault_service: firewalld.service: - name: vault - ports: - 8200/tcp - 8201/tcp -firewalld_zone: +firewalld_vault_zone: firewalld.present: - - name: vault + - name: vaultzone - services: - vault - sources: {%- for mac, properties in salt.grains.get('meta-data:network:interfaces:macs', {}).items() %} - {%- if properties['device-number'] == 0 %} + {%- if properties['device-number'] == "0" %} {%- for cidr in properties['vpc-ipv4-cidr-blocks'].split('\n') %} - {{ cidr }} {%- endfor %} {%- endif %} {%- endfor %} - require: - - firewalld: firewalld_service + - firewalld: firewalld_vault_service diff --git a/salt/vault/install.sls b/salt/vault/install.sls index e86a00f..7d87922 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -77,16 +77,17 @@ install_package_dependencies: - pkgs: {{ vault.module_dependencies.pkgs | json }} - reload_modules: True -install_pip_executable: +{# install_pip_executable: cmd.run: - name: | curl -L "https://bootstrap.pypa.io/get-pip.py" > get_pip.py - sudo python get_pip.py pip==18.0.0 + sudo python get_pip.py pip==19.0.0 rm get_pip.py - - reload_modules: True + - reload_modules: True #} install_python_dependencies: pip.installed: - pkgs: {{ vault.module_dependencies.pip_deps | json }} + - target: /usr/lib/python3.6/site-packages - reload_modules: True - ignore_installed: True diff --git a/salt/vault/maps/osfamilymap.yaml b/salt/vault/maps/osfamilymap.yaml index c27b7ce..421353d 100644 --- a/salt/vault/maps/osfamilymap.yaml +++ b/salt/vault/maps/osfamilymap.yaml @@ -11,12 +11,11 @@ RedHat: - libffi-devel - python-devel - openssl-devel + - python36-pip pip_deps: - hvac - testinfra - boto3 - - pyotp - Debian: gpg_pkg: gnupg2 @@ -34,4 +33,3 @@ Debian: - hvac - testinfra - boto3 - - pyotp From 30a661e364675f28d884f2b4cfdf1193a621ed6c Mon Sep 17 00:00:00 2001 From: Triet Le Date: Mon, 26 Aug 2019 21:10:02 -0400 Subject: [PATCH 07/34] Updates and cleans up tf template to accommodate module test --- main.tf | 76 +++++++++++++++++++++++++++---------- modules/iam/iam_policy.json | 4 +- modules/iam/main.tf | 10 +---- outputs.tf | 2 +- scripts/appscript.sh | 11 ++++-- variables.tf | 31 ++++++--------- 6 files changed, 79 insertions(+), 55 deletions(-) diff --git a/main.tf b/main.tf index e39d1d1..290c256 100644 --- a/main.tf +++ b/main.tf @@ -7,9 +7,8 @@ terraform { ### locals { - name_id = "${var.name}-${random_string.this.result}" vpc_id = "${data.aws_subnet.lb.0.vpc_id}" - role_name = "${upper(var.name)}_INSTANCE_${data.aws_caller_identity.current.account_id}" + role_name = "${upper(var.name)}-INSTANCE-${data.aws_caller_identity.current.account_id}" ssm_root_path = "vault/${var.environment}/${data.aws_caller_identity.current.account_id}/${var.name}" public_ip = "${chomp(data.http.ip.body)}/32" allow_inbound = "${compact(distinct(concat(list(local.public_ip), var.additional_ips_allow_inbound)))}" @@ -18,7 +17,10 @@ locals { appscript_file_name = "appscript.sh" archive_dir_path = "${path.module}/.files" appscript_dir_path = "${path.module}/scripts" - + dynamodb_table = "${var.dynamodb_table == "" ? aws_dynamodb_table.this.id : var.dynamodb_table}" + url = "${var.name}.${var.domain_name}" + vault_url = "${var.vault_url == "" ? local.url : var.vault_url}" + stack_name = "${var.name}-${var.environment}" tags = { Environment = "${var.environment}" } @@ -66,6 +68,7 @@ data "archive_file" "salt" { source_dir = "${path.module}/salt" output_path = "${local.archive_dir_path}/${local.archive_file_name}" } + data "archive_file" "configs" { count = "${var.configs_path == "" ? 0 : 1}" type = "zip" @@ -73,11 +76,22 @@ data "archive_file" "configs" { output_path = "${local.archive_dir_path}/${local.configs_file_name}" } -# Manage Bucket module +data "aws_route53_zone" "this" { + name = "${var.domain_name}" + private_zone = false +} + +data "aws_acm_certificate" "this" { + domain = "*.${var.domain_name}" + types = ["AMAZON_ISSUED"] + most_recent = true +} + +# Manage S3 bucket module module "s3_bucket" { source = "./modules/bucket" - bucket_name = "${var.bucket_name}" + bucket_name = "${var.name}-appscript" } # Manage IAM module @@ -85,10 +99,9 @@ module "iam" { source = "./modules/iam" bucket_name = "${module.s3_bucket.bucket_name}" - dynamodb_table = "${var.dynamodb_table}" - environment = "${var.environment}" + dynamodb_table = "${local.dynamodb_table}" kms_key_id = "${data.aws_kms_key.this.key_id}" - name = "${var.name}" + stack_name = "${local.stack_name}" role_name = "${local.role_name}" ssm_root_path = "${local.ssm_root_path}" } @@ -124,9 +137,8 @@ resource "aws_s3_bucket_object" "app_script" { # Manage domain record resource "aws_route53_record" "this" { - count = "${var.route53_zone_id == "" || var.vault_url == "" ? 0 : 1}" - zone_id = "${var.route53_zone_id}" - name = "${var.vault_url}" + zone_id = "${data.aws_route53_zone.this.zone_id}" + name = "${local.vault_url}" type = "A" alias { @@ -173,7 +185,7 @@ resource "aws_lb_listener" "https" { port = "443" protocol = "HTTPS" ssl_policy = "${var.lb_ssl_policy}" - certificate_arn = "${var.lb_certificate_arn}" + certificate_arn = "${data.aws_acm_certificate.this.arn}" default_action { target_group_arn = "${aws_lb_target_group.this.arn}" @@ -182,7 +194,7 @@ resource "aws_lb_listener" "https" { } resource "aws_lb_target_group" "this" { - name = "${var.name}-${var.environment}" + name = "${var.name}-tg-${var.environment}" port = "8200" protocol = "HTTP" vpc_id = "${local.vpc_id}" @@ -204,14 +216,12 @@ resource "aws_lb_target_group" "this" { unhealthy_threshold = "2" } - tags = "${merge( - map("Name", "${var.name}-tg"), - local.tags)}" + tags = "${merge(map("Name", "${var.name}-tg"), local.tags)}" } # Manage security groups resource "aws_security_group" "lb" { - name = "${var.name}-${var.environment}" + name = "${var.name}-lb-sg-${var.environment}" description = "Rules required for operation of ${var.name}" vpc_id = "${local.vpc_id}" @@ -289,7 +299,7 @@ locals { "${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.archive_file_name}", "${local.s3_configs_key}", "${var.vault_version}", - "${var.dynamodb_table}", + "${local.dynamodb_table}", "${data.aws_kms_key.this.key_id}", "${local.ssm_root_path}" ] @@ -298,11 +308,35 @@ locals { appscript_params = "${join(" ", local.params_for_appscript)}" } + +# Manage Dynamodb Tables +resource "aws_dynamodb_table" "this" { + name = "${var.name}-data" + read_capacity = 5 + write_capacity = 5 + hash_key = "Path" + range_key = "Key" + attribute { + name = "Path" + type = "S" + } + attribute { + name = "Key" + type = "S" + } + + tags = { + Name = "${var.name}-data" + Environment = "${var.environment}" + } +} + + # Manage autoscaling group module "autoscaling_group" { - source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.6" + source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.7" - Name = "${var.name}-${var.environment}" + Name = "${local.stack_name}" OnFailureAction = "" DisableRollback = "true" @@ -336,4 +370,6 @@ module "autoscaling_group" { DesiredCapacity = "${var.desired_capacity}" MinCapacity = "${var.min_capacity}" MaxCapacity = "${var.max_capacity}" + + EnableRepos = "epel" } diff --git a/modules/iam/iam_policy.json b/modules/iam/iam_policy.json index dca26ec..a4bd86a 100644 --- a/modules/iam/iam_policy.json +++ b/modules/iam/iam_policy.json @@ -7,7 +7,7 @@ ], "Effect": "Allow", "Resource": [ - "arn:${partition}:cloudformation:${region}:${account_id}:stack/${name}-${environment}*" + "arn:${partition}:cloudformation:${region}:${account_id}:stack/${stack_name}*" ], "Sid": "CfnActions" }, @@ -63,7 +63,7 @@ ], "Effect": "Allow", "Resource": [ - "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${name}-${environment}*" + "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${stack_name}*" ], "Sid": "CloudWatchLogActions" }, diff --git a/modules/iam/main.tf b/modules/iam/main.tf index 0c8bcdc..cef603a 100644 --- a/modules/iam/main.tf +++ b/modules/iam/main.tf @@ -1,16 +1,11 @@ ### ### REQUIRED VARIABLES ### -variable "name" { +variable "stack_name" { description = "Name of the stack" type = "string" } -variable "environment" { - description = "Type of environment -- must be one of: dev, test, prod" - type = "string" -} - variable "kms_key_id" { description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" type = "string" @@ -65,8 +60,7 @@ data "template_file" "instance_policy" { region = "${data.aws_region.current.name}" account_id = "${data.aws_caller_identity.current.account_id}" - name = "${var.name}" - environment = "${var.environment}" + stack_name = "${var.stack_name}" key_id = "${var.kms_key_id}" dynamodb_table = "${var.dynamodb_table}" bucket_name = "${var.bucket_name}" diff --git a/outputs.tf b/outputs.tf index b7306de..1a02e21 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,4 +1,4 @@ output "vault_url" { description = "URL to access Vault UI" - value = "https://${aws_route53_record.this.0.fqdn}" + value = "https://${aws_route53_record.this.fqdn}" } diff --git a/scripts/appscript.sh b/scripts/appscript.sh index 47289b0..c10d334 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -56,13 +56,16 @@ salt-call --local grains.setval vault \ echo "[appscript]: Update minion config to allow module.run..." printf 'use_superseded:\n - module.run\n' >> /etc/salt/minion -echo "[appscript]: Updating salt states to include custom vault's states..." +echo "[appscript]: Print out salt versions report" +salt-call --local --versions-report + +echo "[appscript]: Updating salt states to include custom vault's states/modules..." salt-call --local saltutil.sync_all -echo "[appscript]: Applying the vault install and configure states..." +echo "[appscript]: Installing vault and configuring service, firewall..." salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee /var/log/salt_vault.log -echo "[appscript]: Initializing the vault..." +echo "[appscript]: Initializing vault..." salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee /var/log/salt_vault_initialize.log # Applying configurations per specific implementation @@ -88,7 +91,7 @@ then salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee /var/log/salt_vault_sync.log else - echo "[appscript]: No vault configurations provided. Skipping configuration steps..." + echo "[appscript]: No vault configurations provided. Skipping configuration vault step..." fi echo "[appscript]: Retrieving Vault's status" diff --git a/variables.tf b/variables.tf index 222da2d..36772cb 100644 --- a/variables.tf +++ b/variables.tf @@ -11,11 +11,6 @@ variable "environment" { type = "string" } -variable "bucket_name" { - description = "The name of the bucket will be use to store app scripts and vault's salt formula." - type = "string" -} - variable "key_pair_name" { description = "Keypair to associate to launched instances" type = "string" @@ -29,11 +24,13 @@ variable "ami_owner" { variable "additional_ips_allow_inbound" { description = "List of ip address that allow to have access to resources" type = "list" + default = [] } variable "ec2_extra_security_group_ids" { description = "List of additional security groups to add to EC2 instances" type = "list" + default = [] } variable "ec2_subnet_ids" { @@ -41,11 +38,6 @@ variable "ec2_subnet_ids" { type = "list" } -variable "lb_certificate_arn" { - type = "string" - description = "Arn of a created certificate to be use for the load balancer" -} - variable "lb_subnet_ids" { description = "List of subnets to associate to the Load Balancer" type = "list" @@ -58,9 +50,16 @@ variable "vault_version" { variable "vault_url" { type = "string" - description = "The DNS address that vault will be accessible at. Example: vault.domain.net" + description = "The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net" + default = "" } +variable "domain_name" { + type = "string" + description = "The domain name where vault url will be registered to. Example: domain.net" +} + + variable "kms_key_id" { description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" type = "string" @@ -69,12 +68,9 @@ variable "kms_key_id" { variable "dynamodb_table" { description = "Name of the Dynamodb to be used as storage backend for Vault" type = "string" + default = "" } -variable "route53_zone_id" { - type = "string" - description = "Zone ID for domain" -} ### ### OPTIONAL VARIABLES @@ -192,11 +188,6 @@ variable "toggle_update" { type = "string" } -variable "route53_enabled" { - description = "Creates Route53 DNS entries for Vault automatically" - default = false -} - variable "tags" { description = "(Optional) list of tags to include with resource" type = "map" From b8352c809eaf9e067b853e3829066552de002333 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Mon, 26 Aug 2019 21:15:05 -0400 Subject: [PATCH 08/34] Update readme.md --- README.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index d5e6e18..53e21a0 100755 --- a/README.md +++ b/README.md @@ -6,18 +6,18 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list | n/a | yes | +| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list | `` | no | | ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | | ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | | ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | -| bucket\_name | The name of the bucket will be use to store app scripts and vault's salt formula. | string | n/a | yes | | cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | | cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | | cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | | configs\_path | Path to directory that contains configuration files for vault | string | `""` | no | | desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | -| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | n/a | yes | -| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | n/a | yes | +| domain\_name | The domain name where vault url will be registered to. Example: domain.net | string | n/a | yes | +| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | `""` | no | +| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | `` | no | | ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list | n/a | yes | | environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | | ingress\_cidr\_blocks | (Optional) List of CIDR block. | list | `` | no | @@ -25,7 +25,6 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | ip\_data\_url | URL to get ip address of the current user | string | `"http://ipv4.icanhazip.com"` | no | | key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | | kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | n/a | yes | -| lb\_certificate\_arn | Arn of a created certificate to be use for the load balancer | string | n/a | yes | | lb\_internal | Boolean indicating whether the load balancer is internal or external | string | `"false"` | no | | lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | | lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list | n/a | yes | @@ -33,11 +32,9 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | min\_capacity | (Optional) Minimum number of instances in the Autoscaling Group | string | `"1"` | no | | name | Name of the vault stack, will be use to prefix resources | string | n/a | yes | | pypi\_index\_url | (Optional) URL to the PyPi Index | string | `"https://pypi.org/simple"` | no | -| route53\_enabled | Creates Route53 DNS entries for Vault automatically | string | `"false"` | no | -| route53\_zone\_id | Zone ID for domain | string | n/a | yes | | tags | (Optional) list of tags to include with resource | map | `` | no | | toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | -| vault\_url | The DNS address that vault will be accessible at. Example: vault.domain.net | string | n/a | yes | +| vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `""` | no | | vault\_version | Version of Vault to be installed on servers | string | n/a | yes | | watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | | watchmaker\_admin\_users | (Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance | string | `""` | no | From 13b8ddb265755cf6d8a68733308c432cad597623 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Mon, 26 Aug 2019 21:15:05 -0400 Subject: [PATCH 09/34] Updates readme.md and adds cast to condition for the firewalld salt state --- README.md | 13 +++++-------- salt/vault/firewall.sls | 2 +- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index d5e6e18..53e21a0 100755 --- a/README.md +++ b/README.md @@ -6,18 +6,18 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list | n/a | yes | +| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list | `` | no | | ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | | ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | | ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | -| bucket\_name | The name of the bucket will be use to store app scripts and vault's salt formula. | string | n/a | yes | | cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | | cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | | cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | | configs\_path | Path to directory that contains configuration files for vault | string | `""` | no | | desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | -| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | n/a | yes | -| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | n/a | yes | +| domain\_name | The domain name where vault url will be registered to. Example: domain.net | string | n/a | yes | +| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | `""` | no | +| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | `` | no | | ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list | n/a | yes | | environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | | ingress\_cidr\_blocks | (Optional) List of CIDR block. | list | `` | no | @@ -25,7 +25,6 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | ip\_data\_url | URL to get ip address of the current user | string | `"http://ipv4.icanhazip.com"` | no | | key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | | kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | n/a | yes | -| lb\_certificate\_arn | Arn of a created certificate to be use for the load balancer | string | n/a | yes | | lb\_internal | Boolean indicating whether the load balancer is internal or external | string | `"false"` | no | | lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | | lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list | n/a | yes | @@ -33,11 +32,9 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | min\_capacity | (Optional) Minimum number of instances in the Autoscaling Group | string | `"1"` | no | | name | Name of the vault stack, will be use to prefix resources | string | n/a | yes | | pypi\_index\_url | (Optional) URL to the PyPi Index | string | `"https://pypi.org/simple"` | no | -| route53\_enabled | Creates Route53 DNS entries for Vault automatically | string | `"false"` | no | -| route53\_zone\_id | Zone ID for domain | string | n/a | yes | | tags | (Optional) list of tags to include with resource | map | `` | no | | toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | -| vault\_url | The DNS address that vault will be accessible at. Example: vault.domain.net | string | n/a | yes | +| vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `""` | no | | vault\_version | Version of Vault to be installed on servers | string | n/a | yes | | watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | | watchmaker\_admin\_users | (Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance | string | `""` | no | diff --git a/salt/vault/firewall.sls b/salt/vault/firewall.sls index 329a0e2..e707815 100644 --- a/salt/vault/firewall.sls +++ b/salt/vault/firewall.sls @@ -15,7 +15,7 @@ firewalld_vault_zone: - vault - sources: {%- for mac, properties in salt.grains.get('meta-data:network:interfaces:macs', {}).items() %} - {%- if properties['device-number'] == "0" %} + {%- if properties['device-number'] | int == 0 %} {%- for cidr in properties['vpc-ipv4-cidr-blocks'].split('\n') %} - {{ cidr }} {%- endfor %} From 3ebf5af335169d5f097a50aa0ef9e5348c20d46a Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 28 Aug 2019 12:33:32 -0400 Subject: [PATCH 10/34] Updates module test for both py2 and py3 --- tests/base/main.tf | 71 -------------------------- tests/vault-py2/main.tf | 36 +++++++++++++ tests/{base => vault-py2}/variables.tf | 30 ++++++----- tests/vault-py3/main.tf | 38 ++++++++++++++ tests/vault-py3/variables.tf | 65 +++++++++++++++++++++++ 5 files changed, 157 insertions(+), 83 deletions(-) delete mode 100644 tests/base/main.tf create mode 100644 tests/vault-py2/main.tf rename tests/{base => vault-py2}/variables.tf (76%) create mode 100644 tests/vault-py3/main.tf create mode 100644 tests/vault-py3/variables.tf diff --git a/tests/base/main.tf b/tests/base/main.tf deleted file mode 100644 index 080265d..0000000 --- a/tests/base/main.tf +++ /dev/null @@ -1,71 +0,0 @@ -locals { - test_prefix = "tf-vault-${random_string.this.result}" - key_pair_name = "${var.key_pair_name == "" ? aws_key_pair.this.0.id : var.key_pair_name}" - kms_key_id = "${var.kms_key_id == "" ? aws_kms_key.this.0.id : var.kms_key_id}" -} - -resource "random_string" "this" { - length = 6 - upper = false - special = false -} -resource "tls_private_key" "this" { - count = "${var.key_pair_name == "" ? 1 : 0}" - algorithm = "RSA" - rsa_bits = "4096" -} - -resource "aws_key_pair" "this" { - count = "${var.key_pair_name == "" ? 1 : 0}" - key_name = "${local.test_prefix}-rsa" - public_key = "${tls_private_key.this.0.public_key_openssh}" -} -resource "aws_kms_alias" "this" { - count = "${var.key_pair_name == "" ? 1 : 0}" - name = "alias/${local.test_prefix}" - target_key_id = "${aws_kms_key.this.0.key_id}" -} -resource "aws_kms_key" "this" { - count = "${var.kms_key_id == "" ? 1 : 0}" - description = "KSM Key for vault tests" - deletion_window_in_days = 10 - tags = { - Environment = "${var.environment}" - Name = "${local.test_prefix}" - } -} - -module "vault-py3" { - source = "../../" - environment = "${var.environment}" - desired_capacity = 1 - ami_owner = "${var.ami_owner}" - - name = "${local.test_prefix}-py3" - key_pair_name = "${local.key_pair_name}" - kms_key_id = "${local.kms_key_id}" - ec2_subnet_ids = "${var.ec2_subnet_ids}" - lb_subnet_ids = "${var.lb_subnet_ids}" - - cloudwatch_agent_url = "${var.cloudwatch_agent_url}" - - domain_name = "${var.domain_name}" - vault_version = "${var.vault_version}" - dynamodb_table = "${var.dynamodb_table}" - - watchmaker_config = "${var.watchmaker_config}" - - toggle_update = "B" -} - - -data "aws_route53_zone" "this" { - name = "${var.domain_name}" - private_zone = false -} - -data "aws_acm_certificate" "this" { - domain = "*.${var.domain_name}" - types = ["AMAZON_ISSUED"] - most_recent = true -} diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf new file mode 100644 index 0000000..3aebafd --- /dev/null +++ b/tests/vault-py2/main.tf @@ -0,0 +1,36 @@ +terraform { + required_version = ">= 0.12" +} + +resource "random_id" "name" { + byte_length = 6 + prefix = "tf-vault-" +} + +module "vault-py2" { + source = "../../" + + environment = var.environment + desired_capacity = 1 + ami_owner = var.ami_owner + + name = "${random_id.name.hex}-py2" + key_pair_name = var.key_pair_name + kms_key_id = var.kms_key_id + ec2_subnet_ids = var.ec2_subnet_ids + lb_subnet_ids = var.lb_subnet_ids + + cloudwatch_agent_url = var.cloudwatch_agent_url + + domain_name = var.domain_name + route53_zone_id = var.route53_zone_id + + # Vault settings + vault_version = var.vault_version + dynamodb_table = var.dynamodb_table + + # Watchmaker settings + watchmaker_config = var.watchmaker_config + + toggle_update = "B" +} diff --git a/tests/base/variables.tf b/tests/vault-py2/variables.tf similarity index 76% rename from tests/base/variables.tf rename to tests/vault-py2/variables.tf index d48bb9f..d878e25 100644 --- a/tests/base/variables.tf +++ b/tests/vault-py2/variables.tf @@ -1,59 +1,65 @@ variable "environment" { description = "Type of environment -- must be one of: dev, test, prod" - type = "string" + type = string default = "test" } + variable "key_pair_name" { description = "Keypair to associate to launched instances" - type = "string" - default = "" + type = string } variable "ami_owner" { description = "Account id/alias of the AMI owner" - type = "string" + type = string } variable "ec2_subnet_ids" { description = "List of subnets where EC2 instances will be launched" - type = "list" + type = list(string) } variable "lb_subnet_ids" { description = "List of subnets to associate to the Load Balancer" - type = "list" + type = list(string) } variable "vault_version" { description = "Version of Vault to be installed on servers" - type = "string" + type = string } variable "kms_key_id" { description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" - type = "string" + type = string default = "" } variable "domain_name" { - type = "string" + type = string description = "Domain to provision test vault cluster" } +variable "route53_zone_id" { + type = string + description = "Hosted zone ID Route 53 hosted zone" +} + variable "dynamodb_table" { description = "Name of the Dynamodb to be used as storage backend for Vault" - type = "string" + type = string default = "" } variable "cloudwatch_agent_url" { - type = "string" + type = string description = "(Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" default = "" } variable "watchmaker_config" { - type = "string" + type = string description = "(Optional) URL to a Watchmaker config file" default = "" } + diff --git a/tests/vault-py3/main.tf b/tests/vault-py3/main.tf new file mode 100644 index 0000000..c58092f --- /dev/null +++ b/tests/vault-py3/main.tf @@ -0,0 +1,38 @@ +terraform { + required_version = ">= 0.12" +} + +resource "random_id" "name" { + byte_length = 6 + prefix = "tf-vault-py3-" +} + + +module "vault-py3" { + source = "../../" + + environment = var.environment + desired_capacity = 1 + ami_owner = var.ami_owner + + name = "${random_id.name.hex}-py3" + key_pair_name = var.key_pair_name + kms_key_id = var.kms_key_id + ec2_subnet_ids = var.ec2_subnet_ids + lb_subnet_ids = var.lb_subnet_ids + + cloudwatch_agent_url = var.cloudwatch_agent_url + + domain_name = var.domain_name + route53_zone_id = var.route53_zone_id + + # Vault settings + vault_version = var.vault_version + dynamodb_table = var.dynamodb_table + + # Watchmaker settings + watchmaker_config = var.watchmaker_config + + toggle_update = "B" +} + diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf new file mode 100644 index 0000000..d878e25 --- /dev/null +++ b/tests/vault-py3/variables.tf @@ -0,0 +1,65 @@ +variable "environment" { + description = "Type of environment -- must be one of: dev, test, prod" + type = string + default = "test" +} + +variable "key_pair_name" { + description = "Keypair to associate to launched instances" + type = string +} + +variable "ami_owner" { + description = "Account id/alias of the AMI owner" + type = string +} + +variable "ec2_subnet_ids" { + description = "List of subnets where EC2 instances will be launched" + type = list(string) +} + +variable "lb_subnet_ids" { + description = "List of subnets to associate to the Load Balancer" + type = list(string) +} + +variable "vault_version" { + description = "Version of Vault to be installed on servers" + type = string +} + +variable "kms_key_id" { + description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" + type = string + default = "" +} + +variable "domain_name" { + type = string + description = "Domain to provision test vault cluster" +} + +variable "route53_zone_id" { + type = string + description = "Hosted zone ID Route 53 hosted zone" +} + +variable "dynamodb_table" { + description = "Name of the Dynamodb to be used as storage backend for Vault" + type = string + default = "" +} + +variable "cloudwatch_agent_url" { + type = string + description = "(Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" + default = "" +} + +variable "watchmaker_config" { + type = string + description = "(Optional) URL to a Watchmaker config file" + default = "" +} + From 0bee32cdbbbd30a9dd04606ce3a26ae486861674 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 28 Aug 2019 12:34:29 -0400 Subject: [PATCH 11/34] Clean up tf template/scripts per feedback --- README.md | 45 --- main.tf | 304 +++++++++--------- modules/bucket/main.tf | 53 --- modules/iam/iam_policy.json | 2 +- modules/iam/main.tf | 75 ++--- outputs.tf | 1 + .../bucket => policies}/bucket_policy.json | 0 salt/vault/install.sls | 36 ++- salt/vault/maps/osfamilymap.yaml | 1 - scripts/appscript.sh | 74 ++--- variables.tf | 81 +++-- 11 files changed, 280 insertions(+), 392 deletions(-) delete mode 100644 modules/bucket/main.tf rename {modules/bucket => policies}/bucket_policy.json (100%) diff --git a/README.md b/README.md index 53e21a0..0fd88c6 100755 --- a/README.md +++ b/README.md @@ -2,48 +2,3 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA DyanamoDb storage backend. -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|:----:|:-----:|:-----:| -| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list | `` | no | -| ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | -| ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | -| ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | -| cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | -| cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | -| cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | -| configs\_path | Path to directory that contains configuration files for vault | string | `""` | no | -| desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | -| domain\_name | The domain name where vault url will be registered to. Example: domain.net | string | n/a | yes | -| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | `""` | no | -| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list | `` | no | -| ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list | n/a | yes | -| environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | -| ingress\_cidr\_blocks | (Optional) List of CIDR block. | list | `` | no | -| instance\_type | Amazon EC2 instance type | string | `"t2.medium"` | no | -| ip\_data\_url | URL to get ip address of the current user | string | `"http://ipv4.icanhazip.com"` | no | -| key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | -| kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | n/a | yes | -| lb\_internal | Boolean indicating whether the load balancer is internal or external | string | `"false"` | no | -| lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | -| lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list | n/a | yes | -| max\_capacity | (Optional) Maximum number of instances in the Autoscaling Group | string | `"2"` | no | -| min\_capacity | (Optional) Minimum number of instances in the Autoscaling Group | string | `"1"` | no | -| name | Name of the vault stack, will be use to prefix resources | string | n/a | yes | -| pypi\_index\_url | (Optional) URL to the PyPi Index | string | `"https://pypi.org/simple"` | no | -| tags | (Optional) list of tags to include with resource | map | `` | no | -| toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | -| vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `""` | no | -| vault\_version | Version of Vault to be installed on servers | string | n/a | yes | -| watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | -| watchmaker\_admin\_users | (Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance | string | `""` | no | -| watchmaker\_config | (Optional) URL to a Watchmaker config file | string | `""` | no | -| watchmaker\_ou\_path | (Optional) DN of the OU to place the instance when joining a domain. If blank and WatchmakerEnvironment enforces a domain join, the instance will be placed in a default container. Leave blank if not joining a domain, or if WatchmakerEnvironment is false | string | `""` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| vault\_url | URL to access Vault UI | - diff --git a/main.tf b/main.tf index 290c256..5a72775 100644 --- a/main.tf +++ b/main.tf @@ -1,3 +1,4 @@ + terraform { required_version = ">= 0.12" } @@ -7,103 +8,115 @@ terraform { ### locals { - vpc_id = "${data.aws_subnet.lb.0.vpc_id}" - role_name = "${upper(var.name)}-INSTANCE-${data.aws_caller_identity.current.account_id}" - ssm_root_path = "vault/${var.environment}/${data.aws_caller_identity.current.account_id}/${var.name}" - public_ip = "${chomp(data.http.ip.body)}/32" - allow_inbound = "${compact(distinct(concat(list(local.public_ip), var.additional_ips_allow_inbound)))}" - archive_file_name = "salt.zip" - configs_file_name = "configs.zip" - appscript_file_name = "appscript.sh" - archive_dir_path = "${path.module}/.files" - appscript_dir_path = "${path.module}/scripts" - dynamodb_table = "${var.dynamodb_table == "" ? aws_dynamodb_table.this.id : var.dynamodb_table}" - url = "${var.name}.${var.domain_name}" - vault_url = "${var.vault_url == "" ? local.url : var.vault_url}" - stack_name = "${var.name}-${var.environment}" - tags = { - Environment = "${var.environment}" - } + vpc_id = element(data.aws_subnet.lb.*.vpc_id, 0) + archive_file_name = "salt.zip" + configs_file_name = "configs.zip" + appscript_file_name = "appscript.sh" + appscript_url = join("/", [module.s3_bucket.id, random_string.this.result, local.appscript_file_name]) + archive_dir_path = join("/", [path.module, ".files"]) + appscript_dir_path = join("/", [path.module, "scripts"]) + role_name = join("-", [upper(var.name), "INSTANCE", data.aws_caller_identity.current.account_id]) + ssm_root_path = join("/", ["vault", var.environment, data.aws_caller_identity.current.account_id, var.name]) + s3_salt_vault_content = join("/", [module.s3_bucket.id, random_string.this.result, local.archive_file_name]) + s3_vault_configuration = var.configs_path == "" ? "n/a" : join("/", [module.s3_bucket.id, random_string.this.result, local.configs_file_name]) + dynamodb_table = var.dynamodb_table == "" ? aws_dynamodb_table.this.id : var.dynamodb_table + kms_key_id = var.kms_key_id == "" ? join("", aws_kms_key.this.*.id) : var.kms_key_id + vault_url = var.vault_url == "" ? join(".", [var.name, var.domain_name]) : var.vault_url + + tags = merge(var.tags, + { + Environment = var.environment + } + ) } ### ### DATA SOURCES ### -data "aws_partition" "current" {} +data "aws_partition" "current" { +} -data "aws_caller_identity" "current" {} +data "aws_caller_identity" "current" { +} -data "aws_region" "current" {} +data "aws_region" "current" { +} data "aws_ami" "this" { most_recent = "true" - owners = ["${var.ami_owner}"] - - name_regex = "${var.ami_name_regex}" - + owners = [var.ami_owner] + name_regex = var.ami_name_regex filter { name = "name" - values = ["${var.ami_name_filter}"] + values = [var.ami_name_filter] } } -data "http" "ip" { - url = "${var.ip_data_url}" -} - data "aws_subnet" "lb" { - count = "${length(var.lb_subnet_ids)}" + count = length(var.lb_subnet_ids) - id = "${var.lb_subnet_ids[count.index]}" -} - -data "aws_kms_key" "this" { - key_id = "${var.kms_key_id}" + id = var.lb_subnet_ids[count.index] } data "archive_file" "salt" { type = "zip" - source_dir = "${path.module}/salt" - output_path = "${local.archive_dir_path}/${local.archive_file_name}" + source_dir = join("/", [path.module, "salt"]) + output_path = join("/", [local.archive_dir_path, local.archive_file_name]) } data "archive_file" "configs" { - count = "${var.configs_path == "" ? 0 : 1}" + count = var.configs_path == "" ? 0 : 1 type = "zip" - source_dir = "${var.configs_path}" - output_path = "${local.archive_dir_path}/${local.configs_file_name}" -} - -data "aws_route53_zone" "this" { - name = "${var.domain_name}" - private_zone = false + source_dir = var.configs_path + output_path = join("/", [local.archive_dir_path, local.configs_file_name]) } data "aws_acm_certificate" "this" { - domain = "*.${var.domain_name}" + domain = join(".", ["*", var.domain_name]) types = ["AMAZON_ISSUED"] most_recent = true } +data "template_file" "appscript" { + template = file(join("/", [local.appscript_dir_path, local.appscript_file_name])) + + vars = { + salt_content_archive = local.s3_salt_vault_content + vault_config_archive = local.s3_vault_configuration + vault_version = var.vault_version + dynamodb_table = local.dynamodb_table + kms_key_id = local.kms_key_id + ssm_path = local.ssm_root_path + } +} + # Manage S3 bucket module module "s3_bucket" { - source = "./modules/bucket" + source = "terraform-aws-modules/s3-bucket/aws" + + bucket = var.name +} - bucket_name = "${var.name}-appscript" + +resource "aws_s3_bucket_policy" "this" { + bucket = module.s3_bucket.id + policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = module.s3_bucket.arn }) } # Manage IAM module module "iam" { source = "./modules/iam" - bucket_name = "${module.s3_bucket.bucket_name}" - dynamodb_table = "${local.dynamodb_table}" - kms_key_id = "${data.aws_kms_key.this.key_id}" - stack_name = "${local.stack_name}" - role_name = "${local.role_name}" - ssm_root_path = "${local.ssm_root_path}" + role_name = local.role_name + policy_vars = { + bucket_name = var.name + dynamodb_table = local.dynamodb_table + kms_key_id = local.kms_key_id + stack_name = var.name + ssm_path = local.ssm_root_path + } } # Generate a random id for each deployment @@ -114,46 +127,60 @@ resource "random_string" "this" { # Manage archive and appscript files resource "aws_s3_bucket_object" "salt_zip" { - bucket = "${module.s3_bucket.bucket_name}" - key = "${random_string.this.result}/${local.archive_file_name}" - source = "${local.archive_dir_path}/${local.archive_file_name}" - etag = "${data.archive_file.salt.output_md5}" + bucket = module.s3_bucket.id + key = join("/", [random_string.this.result, local.archive_file_name]) + source = join("/", [local.archive_dir_path, local.archive_file_name]) + etag = data.archive_file.salt.output_md5 } resource "aws_s3_bucket_object" "configs_zip" { - count = "${var.configs_path == "" ? 0 : 1}" - bucket = "${module.s3_bucket.bucket_name}" - key = "${random_string.this.result}/${local.configs_file_name}" - source = "${local.archive_dir_path}/${local.configs_file_name}" - etag = "${data.archive_file.configs.*.output_md5[count.index]}" + count = var.configs_path == "" ? 0 : 1 + bucket = module.s3_bucket.id + key = join("/", [random_string.this.result, local.configs_file_name]) + source = join("/", [local.archive_dir_path, local.configs_file_name]) + etag = data.archive_file.configs[count.index].output_md5 } resource "aws_s3_bucket_object" "app_script" { - bucket = "${module.s3_bucket.bucket_name}" - key = "${random_string.this.result}/${local.appscript_file_name}" - source = "${local.appscript_dir_path}/${local.appscript_file_name}" - etag = "${filemd5("${local.appscript_dir_path}/${local.appscript_file_name}")}" + bucket = module.s3_bucket.id + key = join("/", [random_string.this.result, local.appscript_file_name]) + content = data.template_file.appscript.rendered + etag = md5(data.template_file.appscript.rendered) } - # Manage domain record resource "aws_route53_record" "this" { - zone_id = "${data.aws_route53_zone.this.zone_id}" - name = "${local.vault_url}" + zone_id = var.route53_zone_id + name = local.vault_url type = "A" alias { - name = "${aws_lb.this.dns_name}" - zone_id = "${aws_lb.this.zone_id}" + name = aws_lb.this.dns_name + zone_id = aws_lb.this.zone_id evaluate_target_health = false } } +# Manage KMS key +resource "aws_kms_alias" "this" { + count = var.kms_key_id == "" ? 1 : 0 + name = "alias/${var.name}" + target_key_id = join("", aws_kms_key.this.*.key_id) +} + +resource "aws_kms_key" "this" { + count = var.kms_key_id == "" ? 1 : 0 + description = "KSM Key for ${var.name}" + deletion_window_in_days = 10 + + tags = merge({ Name = var.name }, local.tags) +} + # Manage load balancer resource "aws_lb" "this" { - name = "${var.name}-lb-${var.environment}" + name = var.name internal = "false" - security_groups = ["${aws_security_group.lb.id}"] - subnets = "${var.lb_subnet_ids}" + security_groups = [aws_security_group.lb.id] + subnets = var.lb_subnet_ids # access_logs { # enabled = true @@ -161,11 +188,11 @@ resource "aws_lb" "this" { # prefix = "logs/lb_access_logs" # } - tags = "${merge(map("Name", "${var.name}-lb"), local.tags)}" + tags = merge({ Name = var.name }, local.tags) } resource "aws_lb_listener" "http" { - load_balancer_arn = "${aws_lb.this.arn}" + load_balancer_arn = aws_lb.this.arn port = "80" protocol = "HTTP" @@ -181,23 +208,23 @@ resource "aws_lb_listener" "http" { } resource "aws_lb_listener" "https" { - load_balancer_arn = "${aws_lb.this.arn}" + load_balancer_arn = aws_lb.this.arn port = "443" protocol = "HTTPS" - ssl_policy = "${var.lb_ssl_policy}" - certificate_arn = "${data.aws_acm_certificate.this.arn}" + ssl_policy = var.lb_ssl_policy + certificate_arn = data.aws_acm_certificate.this.arn default_action { - target_group_arn = "${aws_lb_target_group.this.arn}" + target_group_arn = aws_lb_target_group.this.arn type = "forward" } } resource "aws_lb_target_group" "this" { - name = "${var.name}-tg-${var.environment}" + name = var.name port = "8200" protocol = "HTTP" - vpc_id = "${local.vpc_id}" + vpc_id = local.vpc_id deregistration_delay = "10" @@ -216,27 +243,27 @@ resource "aws_lb_target_group" "this" { unhealthy_threshold = "2" } - tags = "${merge(map("Name", "${var.name}-tg"), local.tags)}" + tags = merge({ Name = var.name }, local.tags) } # Manage security groups resource "aws_security_group" "lb" { - name = "${var.name}-lb-sg-${var.environment}" - description = "Rules required for operation of ${var.name}" - vpc_id = "${local.vpc_id}" + name = "${var.name}-lb" + description = "Allow web traffic to the load balancer" + vpc_id = local.vpc_id ingress { from_port = 80 to_port = 80 protocol = "tcp" - cidr_blocks = "${var.ingress_cidr_blocks}" + cidr_blocks = var.ingress_cidr_blocks } ingress { from_port = 443 to_port = 443 protocol = "tcp" - cidr_blocks = "${var.ingress_cidr_blocks}" + cidr_blocks = var.ingress_cidr_blocks } egress { @@ -246,20 +273,20 @@ resource "aws_security_group" "lb" { cidr_blocks = ["0.0.0.0/0"] } - tags = "${merge(map("Name", "${var.name}-lb-${var.environment}"), local.tags)}" + tags = merge({ Name = "${var.name}-lb" }, local.tags) } resource "aws_security_group" "ec2" { - name = "${var.name}-ec2-sg-${var.environment}" - description = "Rules required for operation of ${var.name}" - vpc_id = "${local.vpc_id}" + name = "${var.name}-ec2" + description = "Allow vault traffic between ALB and EC2 instances" + vpc_id = local.vpc_id ingress { from_port = 8200 to_port = 8200 description = "Allows traffics to come to vault" protocol = "tcp" - security_groups = ["${aws_security_group.lb.id}"] + security_groups = [aws_security_group.lb.id] } ingress { @@ -277,99 +304,68 @@ resource "aws_security_group" "ec2" { cidr_blocks = ["0.0.0.0/0"] } - tags = "${merge(map("Name", "${var.name}-ec2-sg-${var.environment}"), local.tags)}" -} - -resource "aws_security_group_rule" "ssh" { - count = "${var.environment == "dev" ? 1 : 0}" - type = "ingress" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = "${local.allow_inbound}" - - security_group_id = "${aws_security_group.ec2.id}" + tags = merge({ Name = "${var.name}-ec2" }, local.tags) } -# Prepare appscript parameters -locals { - # combine key to configs s3 object, otherwise pass 'n/a' to appscript - s3_configs_key = "${var.configs_path == "" ? "n/a" : "${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.configs_file_name}"}" - params_for_appscript = [ - "${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.archive_file_name}", - "${local.s3_configs_key}", - "${var.vault_version}", - "${local.dynamodb_table}", - "${data.aws_kms_key.this.key_id}", - "${local.ssm_root_path}" - ] - - appscript_url = "s3://${module.s3_bucket.bucket_name}/${random_string.this.result}/${local.appscript_file_name}" - appscript_params = "${join(" ", local.params_for_appscript)}" -} - - # Manage Dynamodb Tables resource "aws_dynamodb_table" "this" { - name = "${var.name}-data" + name = var.name read_capacity = 5 write_capacity = 5 hash_key = "Path" range_key = "Key" + attribute { name = "Path" type = "S" } + attribute { name = "Key" type = "S" } - tags = { - Name = "${var.name}-data" - Environment = "${var.environment}" - } + tags = merge({ Name = var.name }, local.tags) } - -# Manage autoscaling group +# # Manage autoscaling group module "autoscaling_group" { source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.7" - Name = "${local.stack_name}" + Name = var.name OnFailureAction = "" DisableRollback = "true" - AmiId = "${data.aws_ami.this.id}" + AmiId = data.aws_ami.this.id AmiDistro = "CentOS" - AppScriptUrl = "${local.appscript_url}" - AppScriptParams = "${local.appscript_params}" - CfnBootstrapUtilsUrl = "${var.cfn_bootstrap_utils_url}" + AppScriptUrl = join("", ["s3://", local.appscript_url]) + CfnBootstrapUtilsUrl = var.cfn_bootstrap_utils_url - CfnEndpointUrl = "${var.cfn_endpoint_url}" - CloudWatchAgentUrl = "${var.cloudwatch_agent_url}" + CfnEndpointUrl = var.cfn_endpoint_url + CloudWatchAgentUrl = var.cloudwatch_agent_url CloudWatchAppLogs = ["/var/log/salt_vault.log", "/var/log/salt_vault_initialize.log", "/var/log/salt_vault_sync.log"] - KeyPairName = "${var.key_pair_name}" - InstanceRole = "${module.iam.profile_name}" - InstanceType = "${var.instance_type}" + KeyPairName = var.key_pair_name + InstanceRole = module.iam.profile_name + InstanceType = var.instance_type NoReboot = "true" NoPublicIp = "false" - PypiIndexUrl = "${var.pypi_index_url}" - SecurityGroupIds = "${join(",", compact(concat(list(aws_security_group.ec2.id), var.ec2_extra_security_group_ids)))}" - SubnetIds = "${join(",", var.ec2_subnet_ids)}" - TargetGroupArns = "${aws_lb_target_group.this.arn}" - ToggleNewInstances = "${var.toggle_update}" + PypiIndexUrl = var.pypi_index_url + SecurityGroupIds = join(",", compact(concat([aws_security_group.ec2.id], var.ec2_extra_security_group_ids))) + SubnetIds = join(",", var.ec2_subnet_ids) + TargetGroupArns = aws_lb_target_group.this.arn + ToggleNewInstances = var.toggle_update TimeoutInMinutes = "20" - WatchmakerEnvironment = "${var.environment}" - WatchmakerConfig = "${var.watchmaker_config}" - WatchmakerAdminGroups = "${var.watchmaker_admin_groups}" - WatchmakerAdminUsers = "${var.watchmaker_admin_users}" - WatchmakerOuPath = "${var.watchmaker_ou_path}" + WatchmakerEnvironment = var.environment + WatchmakerConfig = var.watchmaker_config + WatchmakerAdminGroups = var.watchmaker_admin_groups + WatchmakerAdminUsers = var.watchmaker_admin_users + WatchmakerOuPath = var.watchmaker_ou_path - DesiredCapacity = "${var.desired_capacity}" - MinCapacity = "${var.min_capacity}" - MaxCapacity = "${var.max_capacity}" + DesiredCapacity = var.desired_capacity + MinCapacity = var.min_capacity + MaxCapacity = var.max_capacity EnableRepos = "epel" } + diff --git a/modules/bucket/main.tf b/modules/bucket/main.tf deleted file mode 100644 index 92cb8b6..0000000 --- a/modules/bucket/main.tf +++ /dev/null @@ -1,53 +0,0 @@ -### -### VARIABLES -### - -variable "bucket_name" { - description = "The name of the bucket will be use to store app scripts and vault's salt formula." - type = "string" - default = "vault-salt-formula" -} - -### -### DATA SOURCES -### - -data "aws_partition" "current" {} - -data "aws_caller_identity" "current" {} - -data "aws_region" "current" {} - -data "template_file" "bucket_policy" { - template = "${file("${path.module}/bucket_policy.json")}" - - vars = { - bucket_arn = "${aws_s3_bucket.this.arn}" - } -} - -### -### RESOURCES -### - -resource "aws_s3_bucket" "this" { - bucket = "${var.bucket_name}" -} - -resource "aws_s3_bucket_policy" "this" { - bucket = "${aws_s3_bucket.this.id}" - policy = "${data.template_file.bucket_policy.rendered}" -} - -### -### OUTPUTS -### -output "bucket_name" { - description = "Name of the S3 bucket" - value = "${aws_s3_bucket.this.id}" -} - -output "bucket_arn" { - description = "ARN of the S3 bucket" - value = "${aws_s3_bucket.this.arn}" -} diff --git a/modules/iam/iam_policy.json b/modules/iam/iam_policy.json index a4bd86a..b8ef8ae 100644 --- a/modules/iam/iam_policy.json +++ b/modules/iam/iam_policy.json @@ -51,7 +51,7 @@ ], "Effect": "Allow", "Resource": [ - "arn:aws:kms:${region}:${account_id}:key/${key_id}" + "arn:aws:kms:${region}:${account_id}:key/${kms_key_id}" ], "Sid": "KMSDescribeKey" }, diff --git a/modules/iam/main.tf b/modules/iam/main.tf index cef603a..f1a11ed 100644 --- a/modules/iam/main.tf +++ b/modules/iam/main.tf @@ -1,71 +1,49 @@ ### ### REQUIRED VARIABLES ### -variable "stack_name" { - description = "Name of the stack" - type = "string" -} - -variable "kms_key_id" { - description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" - type = "string" -} - -variable "dynamodb_table" { - description = "Name of the Dynamodb to be used as storage backend for Vault" - type = "string" -} - -variable "bucket_name" { - description = "The name of the bucket will be use to store app scripts and vault's salt formula." - type = "string" +variable "policy_vars" { + description = "Variables for interpolation within the template. Must include the following vars: bucket_name, dynamodb_table, kms_key_id, stack_name, ssm_path" + type = map(string) } variable "role_name" { description = "Name of the role to be create for vault" - type = "string" -} - -variable "ssm_root_path" { - description = "SSM parameter path. Initialize scripts will create tokens and store them as parameter at this path." - type = "string" + type = string } - ### ### OPTIONAL VARIABLES ### variable "url_suffix" { default = "amazonaws.com" description = "URL suffix associated with the current partition" - type = "string" + type = string } ### ### DATA ### -data "aws_partition" "current" {} +data "aws_partition" "current" { +} -data "aws_caller_identity" "current" {} +data "aws_caller_identity" "current" { +} -data "aws_region" "current" {} +data "aws_region" "current" { +} ### ### RESOURCES ### data "template_file" "instance_policy" { - template = "${file("${path.module}/iam_policy.json")}" + template = file("${path.module}/iam_policy.json") - vars = { - partition = "${data.aws_partition.current.partition}" - region = "${data.aws_region.current.name}" - account_id = "${data.aws_caller_identity.current.account_id}" - - stack_name = "${var.stack_name}" - key_id = "${var.kms_key_id}" - dynamodb_table = "${var.dynamodb_table}" - bucket_name = "${var.bucket_name}" - ssm_path = "${var.ssm_root_path}" - } + vars = merge(var.policy_vars, + { + partition = data.aws_partition.current.partition + region = data.aws_region.current.name + account_id = data.aws_caller_identity.current.account_id + } + ) } data "aws_iam_policy_document" "instance_trust_policy" { @@ -80,19 +58,19 @@ data "aws_iam_policy_document" "instance_trust_policy" { } resource "aws_iam_role" "instance" { - name = "${var.role_name}" - assume_role_policy = "${data.aws_iam_policy_document.instance_trust_policy.json}" + name = var.role_name + assume_role_policy = data.aws_iam_policy_document.instance_trust_policy.json } resource "aws_iam_role_policy" "instance" { name_prefix = "${var.role_name}_" - policy = "${data.template_file.instance_policy.rendered}" - role = "${aws_iam_role.instance.id}" + policy = data.template_file.instance_policy.rendered + role = aws_iam_role.instance.id } resource "aws_iam_instance_profile" "instance" { - name = "${var.role_name}" - role = "${aws_iam_role.instance.name}" + name = var.role_name + role = aws_iam_role.instance.name } ### @@ -100,5 +78,6 @@ resource "aws_iam_instance_profile" "instance" { ### output "profile_name" { - value = "${aws_iam_instance_profile.instance.name}" + value = aws_iam_instance_profile.instance.name } + diff --git a/outputs.tf b/outputs.tf index 1a02e21..ab35b45 100644 --- a/outputs.tf +++ b/outputs.tf @@ -2,3 +2,4 @@ output "vault_url" { description = "URL to access Vault UI" value = "https://${aws_route53_record.this.fqdn}" } + diff --git a/modules/bucket/bucket_policy.json b/policies/bucket_policy.json similarity index 100% rename from modules/bucket/bucket_policy.json rename to policies/bucket_policy.json diff --git a/salt/vault/install.sls b/salt/vault/install.sls index 7d87922..8ea40d7 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -71,19 +71,39 @@ vault_package_install_cmd_run: - onchanges: - archive: vault_package_install_archive_extracted - install_package_dependencies: pkg.installed: - pkgs: {{ vault.module_dependencies.pkgs | json }} - reload_modules: True -{# install_pip_executable: +# Python2 +{%- if salt.grains.get('pythonversion')[0] | int == 2 %} + +install_pip_module: + pkg.installed: + - name: python2-pip + +install_pip_upgrade: cmd.run: - - name: | - curl -L "https://bootstrap.pypa.io/get-pip.py" > get_pip.py - sudo python get_pip.py pip==19.0.0 - rm get_pip.py - - reload_modules: True #} + - name: python2 -m pip install --ignore-installed --upgrade 'pip==18.0.0' + - unless: python2 -m pip -V | grep '18.0.0' + - require: + - pkg: install_pip_module + - reload_modules: True + +install_python_dependencies: + pip.installed: + - pkgs: {{ vault.module_dependencies.pip_deps | json }} + - reload_modules: True + - ignore_installed: True + +{%- endif %} + +# Python3 +{%- if salt.grains.get('pythonversion')[0] | int == 3 %} +install_pip_module: + pkg.installed: + - name: python36-pip install_python_dependencies: pip.installed: @@ -91,3 +111,5 @@ install_python_dependencies: - target: /usr/lib/python3.6/site-packages - reload_modules: True - ignore_installed: True +{%- endif %} + diff --git a/salt/vault/maps/osfamilymap.yaml b/salt/vault/maps/osfamilymap.yaml index 421353d..fc8b25a 100644 --- a/salt/vault/maps/osfamilymap.yaml +++ b/salt/vault/maps/osfamilymap.yaml @@ -11,7 +11,6 @@ RedHat: - libffi-devel - python-devel - openssl-devel - - python36-pip pip_deps: - hvac - testinfra diff --git a/scripts/appscript.sh b/scripts/appscript.sh index c10d334..8497b49 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -1,47 +1,37 @@ #!/bin/bash set -eu -o pipefail -[[ $# -lt 6 ]] && { - echo "Usage $0 " >&2 - echo " Example: $0 bucket-foo/randomid/salt.zip bucket-foo/randomid/configs.zip 1.2.0 - vault-data-table xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx vault/dev/token /etc/vault/configs" >&2 - exit 1 -} - # Required vars -SALT_ARCHIVE=$1 -CONFIGS_ARCHIVE=$2 -VAULT_VERSION=$3 -DYNAMODB_TABLE=$4 -KMS_KEY_ID=$5 -SSM_PATH=$6 - -# Internal vars -AWS_AZ=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone) +SALT_ARCHIVE=${salt_content_archive} +CONFIGS_ARCHIVE=${vault_config_archive} +VAULT_VERSION=${vault_version} +DYNAMODB_TABLE=${dynamodb_table} +KMS_KEY_ID=${kms_key_id} +SSM_PATH=${ssm_path} -# Export standard aws envs -export AWS_DEFAULT_REGION=${AWS_AZ:0:${#AWS_AZ} - 1} +# Standard aws envs +export AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') -# Export Vault local address +# Vault local address export VAULT_ADDR=http://127.0.0.1:8200 -export SALT_DIR="/srv/salt" -export CONFIGURATION_PATH="/etc/vault/configs" -export ARCHIVE_FILE_NAME="salt_formula.zip" -export CONFIGS_FILE_NAME="vault_configs.zip" +SALT_DIR="/srv/salt" +CONFIGURATION_PATH="/etc/vault/configs" +ARCHIVE_FILE_NAME="salt_formula.zip" +CONFIGS_FILE_NAME="vault_configs.zip" yum install unzip -y -echo "[appscript]: Ensuring default salt srv location exists, ${SALT_DIR}..." -mkdir -p ${SALT_DIR} +echo "[appscript]: Ensuring default salt srv location exists, $SALT_DIR..." +mkdir -p $SALT_DIR -echo "[appscript]: Download salt formula archive file from s3://${SALT_ARCHIVE}..." -aws s3 cp "s3://${SALT_ARCHIVE}" ${ARCHIVE_FILE_NAME} +echo "[appscript]: Download salt formula archive file from s3://$SALT_ARCHIVE..." +aws s3 cp "s3://$SALT_ARCHIVE" $ARCHIVE_FILE_NAME -echo "[appscript]: Unzip salt formula archive file to ${SALT_DIR}" -unzip ${ARCHIVE_FILE_NAME} -d ${SALT_DIR} +echo "[appscript]: Unzip salt formula archive file to $SALT_DIR" +unzip $ARCHIVE_FILE_NAME -d $SALT_DIR -echo "[appscript]: Remove salt formula archive file ${ARCHIVE_FILE_NAME}" -rm ${ARCHIVE_FILE_NAME} +echo "[appscript]: Remove salt formula archive file $ARCHIVE_FILE_NAME" +rm $ARCHIVE_FILE_NAME echo "[appscript]: Updating salt grains..." salt-call --local saltutil.sync_grains @@ -51,7 +41,7 @@ echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf echo "[appscript]: Setting required salt grains for vault..." salt-call --local grains.setval vault \ -"{'version':'${VAULT_VERSION}', 'dynamodb_table':'${DYNAMODB_TABLE}', 'kms_key_id':'${KMS_KEY_ID}', 'region':'${AWS_DEFAULT_REGION}', 'ssm_path': '${SSM_PATH}', 'config_dir_path': '${CONFIGURATION_PATH}'}" +"{'version':'$VAULT_VERSION', 'dynamodb_table':'$DYNAMODB_TABLE', 'kms_key_id':'$KMS_KEY_ID', 'region':'$AWS_DEFAULT_REGION', 'ssm_path': '$SSM_PATH', 'config_dir_path': '$CONFIGURATION_PATH'}" echo "[appscript]: Update minion config to allow module.run..." printf 'use_superseded:\n - module.run\n' >> /etc/salt/minion @@ -69,23 +59,23 @@ echo "[appscript]: Initializing vault..." salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee /var/log/salt_vault_initialize.log # Applying configurations per specific implementation -if [ "${CONFIGS_ARCHIVE}" != "n/a" ]; +if [ "$CONFIGS_ARCHIVE" != "n/a" ]; then echo "[appscript]: Retrieving root token to assist configuration provisioning..." - VAULT_TOKEN=$(aws ssm get-parameter --name /"${SSM_PATH}"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') + VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') export VAULT_TOKEN - echo "[appscript]: Ensuring default vault configs location exists, ${CONFIGURATION_PATH}..." - mkdir -p ${CONFIGURATION_PATH} + echo "[appscript]: Ensuring default vault configs location exists, $CONFIGURATION_PATH..." + mkdir -p $CONFIGURATION_PATH - echo "[appscript]: Download vault configs archive file from s3://${CONFIGS_ARCHIVE}..." - aws s3 cp "s3://${CONFIGS_ARCHIVE}" ${CONFIGS_FILE_NAME} + echo "[appscript]: Download vault configs archive file from s3://$CONFIGS_ARCHIVE..." + aws s3 cp "s3://$CONFIGS_ARCHIVE" $CONFIGS_FILE_NAME - echo "[appscript]: Unzip vault configs archive file to ${CONFIGURATION_PATH}..." - unzip ${CONFIGS_FILE_NAME} -d ${CONFIGURATION_PATH} + echo "[appscript]: Unzip vault configs archive file to $CONFIGURATION_PATH..." + unzip $CONFIGS_FILE_NAME -d $CONFIGURATION_PATH - echo "[appscript]: Remove vault configs archive file ${CONFIGS_FILE_NAME}" - rm ${CONFIGS_FILE_NAME} + echo "[appscript]: Remove vault configs archive file $CONFIGS_FILE_NAME" + rm $CONFIGS_FILE_NAME echo "[appscript]: Sync configurations with the vault..." salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee /var/log/salt_vault_sync.log diff --git a/variables.tf b/variables.tf index 36772cb..19b00e7 100644 --- a/variables.tf +++ b/variables.tf @@ -2,200 +2,199 @@ ### REQUIRED VARIABLES ### variable "name" { + type = string description = "Name of the vault stack, will be use to prefix resources" - type = "string" } variable "environment" { + type = string description = "Type of environment -- must be one of: dev, test, prod" - type = "string" } variable "key_pair_name" { + type = string description = "Keypair to associate to launched instances" - type = "string" } variable "ami_owner" { + type = string description = "Account id/alias of the AMI owner" - type = "string" } variable "additional_ips_allow_inbound" { + type = list(string) description = "List of ip address that allow to have access to resources" - type = "list" default = [] } variable "ec2_extra_security_group_ids" { + type = list(string) description = "List of additional security groups to add to EC2 instances" - type = "list" default = [] } variable "ec2_subnet_ids" { + type = list(string) description = "List of subnets where EC2 instances will be launched" - type = "list" } variable "lb_subnet_ids" { + type = list(string) description = "List of subnets to associate to the Load Balancer" - type = "list" } variable "vault_version" { + type = string description = "Version of Vault to be installed on servers" - type = "string" } variable "vault_url" { - type = "string" + type = string description = "The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net" default = "" } variable "domain_name" { - type = "string" + type = string description = "The domain name where vault url will be registered to. Example: domain.net" } +variable "route53_zone_id" { + type = string + description = "Hosted zone ID Route 53 hosted zone" +} variable "kms_key_id" { + type = string description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" - type = "string" + default = "" } variable "dynamodb_table" { + type = string description = "Name of the Dynamodb to be used as storage backend for Vault" - type = "string" default = "" } - ### ### OPTIONAL VARIABLES ### variable "ami_name_filter" { + type = string description = "Will be use to filter out AMI" - type = "string" default = "spel-minimal-centos-7-hvm-*.x86_64-gp2" } variable "ami_name_regex" { + type = string description = "Regex to help fine-grain filtering AMI" - type = "string" default = "spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2" } + variable "configs_path" { + type = string description = "Path to directory that contains configuration files for vault" - type = "string" default = "" } variable "instance_type" { - default = "t2.medium" + type = string description = "Amazon EC2 instance type" - type = "string" + default = "t2.medium" } variable "lb_internal" { + type = string description = "Boolean indicating whether the load balancer is internal or external" - type = "string" default = false } variable "ingress_cidr_blocks" { + type = list(string) description = "(Optional) List of CIDR block." - type = "list" default = ["0.0.0.0/0"] } variable "lb_ssl_policy" { + type = string description = "The name of the SSL Policy for the listener" - type = "string" default = "ELBSecurityPolicy-FS-2018-06" } variable "min_capacity" { - type = "string" + type = string description = "(Optional) Minimum number of instances in the Autoscaling Group" default = "1" } variable "max_capacity" { - type = "string" + type = string description = "(Optional) Maximum number of instances in the Autoscaling Group" default = "2" } variable "desired_capacity" { - type = "string" + type = string description = "(Optional) Desired number of instances in the Autoscaling Group" default = "2" } variable "pypi_index_url" { - type = "string" + type = string description = "(Optional) URL to the PyPi Index" default = "https://pypi.org/simple" } variable "cfn_endpoint_url" { - type = "string" + type = string description = "(Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com" default = "https://cloudformation.us-east-1.amazonaws.com" } variable "cfn_bootstrap_utils_url" { - type = "string" + type = string description = "(Optional) URL to aws-cfn-bootstrap-latest.tar.gz" default = "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz" } variable "cloudwatch_agent_url" { - type = "string" + type = string description = "(Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" default = "" } variable "watchmaker_config" { - type = "string" + type = string description = "(Optional) URL to a Watchmaker config file" default = "" } variable "watchmaker_ou_path" { - type = "string" + type = string description = "(Optional) DN of the OU to place the instance when joining a domain. If blank and WatchmakerEnvironment enforces a domain join, the instance will be placed in a default container. Leave blank if not joining a domain, or if WatchmakerEnvironment is false" default = "" } variable "watchmaker_admin_groups" { - type = "string" + type = string description = "(Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance" default = "" } variable "watchmaker_admin_users" { - type = "string" + type = string description = "(Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance" default = "" } variable "toggle_update" { + type = string default = "A" description = "(Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B" - type = "string" } variable "tags" { + type = map(string) description = "(Optional) list of tags to include with resource" - type = "map" default = {} } - -variable "ip_data_url" { - description = "URL to get ip address of the current user" - type = "string" - default = "http://ipv4.icanhazip.com" -} From 049c3817498cc97b06759170f6e48d1b8948bc27 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 28 Aug 2019 13:22:24 -0400 Subject: [PATCH 12/34] Adds baseline of tests --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 038ebb7..55c5492 100755 --- a/Makefile +++ b/Makefile @@ -112,3 +112,9 @@ docs/generate: | guard/program/terraform-docs @ echo "[$@]: Creating documentation files.." cat $(README_PARTS) > $(README_FILE) @ echo "[$@]: Documentation files creation complete!" + +terratest/install: | guard/program/go guard/program/dep + cd tests && dep ensure + +terratest/test: | guard/program/go guard/program/dep + cd tests && go test -timeout 40m From f6fdfe729c3bcc0f5ef153664ee673d8b9a29f2d Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 28 Aug 2019 15:24:58 -0400 Subject: [PATCH 13/34] Exposes variable enable access logs for alb --- main.tf | 10 +++++----- variables.tf | 8 +++++++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/main.tf b/main.tf index 5a72775..5356115 100644 --- a/main.tf +++ b/main.tf @@ -182,11 +182,11 @@ resource "aws_lb" "this" { security_groups = [aws_security_group.lb.id] subnets = var.lb_subnet_ids - # access_logs { - # enabled = true - # bucket = "${module.bucket.bucket_name}" - # prefix = "logs/lb_access_logs" - # } + access_logs { + enabled = var.enable_access_logs + bucket = module.s3_bucket.id + prefix = "ALBLogs" + } tags = merge({ Name = var.name }, local.tags) } diff --git a/variables.tf b/variables.tf index 19b00e7..e7ecb82 100644 --- a/variables.tf +++ b/variables.tf @@ -97,6 +97,12 @@ variable "configs_path" { default = "" } +variable "enable_access_logs" { + type = bool + description = "Boolean indicating whether to enable access logs for load balancer" + default = false +} + variable "instance_type" { type = string description = "Amazon EC2 instance type" @@ -104,7 +110,7 @@ variable "instance_type" { } variable "lb_internal" { - type = string + type = bool description = "Boolean indicating whether the load balancer is internal or external" default = false } From f393553b8ac31d1999ff149d93a10ab93388cecf Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 28 Aug 2019 16:34:11 -0400 Subject: [PATCH 14/34] Adds workaround scripts to make terraform-docs works nicely with tf 0.12 --- .travis.yml | 4 +- Makefile | 2 +- README.md | 46 +++++++++++++++++++ scripts/terraform-docs.awk | 90 ++++++++++++++++++++++++++++++++++++++ scripts/terraform-docs.sh | 14 ++++++ variables.tf | 4 +- 6 files changed, 155 insertions(+), 5 deletions(-) create mode 100644 scripts/terraform-docs.awk create mode 100755 scripts/terraform-docs.sh diff --git a/.travis.yml b/.travis.yml index 937e23b..0ffc3b2 100755 --- a/.travis.yml +++ b/.travis.yml @@ -27,10 +27,10 @@ jobs: name: Terraform Lint/Format Verification install: - make terraform/install - #- make terraform-docs/install + - make terraform-docs/install script: - make terraform/lint - #- make docs/lint + - make docs/lint - stage: deploy if: branch = master AND type = push AND repo = allez-allez-allez/terraform-aws-vault before_script: diff --git a/Makefile b/Makefile index 0ef905f..9a1c57a 100755 --- a/Makefile +++ b/Makefile @@ -100,7 +100,7 @@ json/format: | guard/program/jq $(FIND_JSON) | $(XARGS) bash -c 'echo "$$(jq --indent 4 -S . "{}")" > "{}"' @ echo "[$@]: Successfully formatted JSON files!" -docs/%: README_PARTS := _docs/MAIN.md <(echo) <(terraform-docs markdown table .) +docs/%: README_PARTS := _docs/MAIN.md <(echo) <(./scripts/terraform-docs.sh markdown table .) docs/%: README_FILE ?= README.md docs/lint: | guard/program/terraform-docs diff --git a/README.md b/README.md index 0fd88c6..49f5743 100755 --- a/README.md +++ b/README.md @@ -2,3 +2,49 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA DyanamoDb storage backend. +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list(string) | `` | no | +| ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | +| ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | +| ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | +| cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | +| cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | +| cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | +| configs\_path | Path to directory that contains configuration files for vault | string | `""` | no | +| desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | +| domain\_name | The domain name where vault url will be registered to. Example: domain.net | string | n/a | yes | +| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | `""` | no | +| ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list(string) | `` | no | +| ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list(string) | n/a | yes | +| enable\_access\_logs | Boolean indicating whether to enable access logs for load balancer | bool | `"false"` | no | +| environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | +| ingress\_cidr\_blocks | (Optional) List of CIDR block. | list(string) | `` | no | +| instance\_type | Amazon EC2 instance type | string | `"t2.medium"` | no | +| key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | +| kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | `""` | no | +| lb\_internal | Boolean indicating whether the load balancer is internal or external | bool | `"false"` | no | +| lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | +| lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list(string) | n/a | yes | +| max\_capacity | (Optional) Maximum number of instances in the Autoscaling Group | string | `"2"` | no | +| min\_capacity | (Optional) Minimum number of instances in the Autoscaling Group | string | `"1"` | no | +| name | Name of the vault stack, will be use to prefix resources | string | n/a | yes | +| pypi\_index\_url | (Optional) URL to the PyPi Index | string | `"https://pypi.org/simple"` | no | +| route53\_zone\_id | Hosted zone ID Route 53 hosted zone | string | n/a | yes | +| tags | (Optional) list of tags to include with resource | map(string) | `` | no | +| toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | +| vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `""` | no | +| vault\_version | Version of Vault to be installed on servers | string | n/a | yes | +| watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | +| watchmaker\_admin\_users | (Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance | string | `""` | no | +| watchmaker\_config | (Optional) URL to a Watchmaker config file | string | `""` | no | +| watchmaker\_ou\_path | (Optional) DN of the OU to place the instance when joining a domain. If blank and WatchmakerEnvironment enforces a domain join, the instance will be placed in a default container. Leave blank if not joining a domain, or if WatchmakerEnvironment is false | string | `""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| vault\_url | URL to access Vault UI | + diff --git a/scripts/terraform-docs.awk b/scripts/terraform-docs.awk new file mode 100644 index 0000000..bd6b2b7 --- /dev/null +++ b/scripts/terraform-docs.awk @@ -0,0 +1,90 @@ +# This script converts Terraform 0.12 variables/outputs to something suitable for `terraform-docs` +# As of terraform-docs v0.6.0, HCL2 is not supported. This script is a *dirty hack* to get around it. +# https://github.com/segmentio/terraform-docs/ +# https://github.com/segmentio/terraform-docs/issues/62 + +{ + if ( $0 ~ /\{/ ) { + braceCnt++ + } + + if ( $0 ~ /\}/ ) { + braceCnt-- + } + + # [START] variable or output block started + if ($0 ~ /^[[:space:]]*(variable|output)[[:space:]][[:space:]]*"(.*?)"/) { + # Normalize the braceCnt (should be 1 now) + braceCnt = 1 + # [CLOSE] "default" block + if (blockDefCnt > 0) { + blockDefCnt = 0 + } + blockCnt++ + print $0 + } + + # [START] multiline default statement started + if (blockCnt > 0) { + if ($0 ~ /^[[:space:]][[:space:]]*(default)[[:space:]][[:space:]]*=/) { + if ($3 ~ "null") { + print " default = \"null\"" + } else { + print $0 + blockDefCnt++ + blockDefStart=1 + } + } + } + + # [PRINT] single line "description" + if (blockCnt > 0) { + if (blockDefCnt == 0) { + if ($0 ~ /^[[:space:]][[:space:]]*description[[:space:]][[:space:]]*=/) { + # [CLOSE] "default" block + if (blockDefCnt > 0) { + blockDefCnt = 0 + } + print $0 + } + } + } + + # [PRINT] single line "type" + if (blockCnt > 0) { + if ($0 ~ /^[[:space:]][[:space:]]*type[[:space:]][[:space:]]*=/ ) { + # [CLOSE] "default" block + if (blockDefCnt > 0) { + blockDefCnt = 0 + } + type=$3 + if (type ~ "object") { + print " type = \"object\"" + } else { + # legacy quoted types: "string", "list", and "map" + if ($3 ~ /^[[:space:]]*"(.*?)"[[:space:]]*$/) { + print " type = " $3 + } else { + print " type = \"" $3 "\"" + } + } + } + } + + # [CLOSE] variable/output block + if (blockCnt > 0) { + if (braceCnt == 0 && blockCnt > 0) { + blockCnt-- + print $0 + } + } + + # [PRINT] Multiline "default" statement + if (blockCnt > 0 && blockDefCnt > 0) { + if (blockDefStart == 1) { + blockDefStart = 0 + } else { + print $0 + } + } +} diff --git a/scripts/terraform-docs.sh b/scripts/terraform-docs.sh new file mode 100755 index 0000000..ecdbea7 --- /dev/null +++ b/scripts/terraform-docs.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +which awk 2>&1 >/dev/null || ( echo "awk not available"; exit 1) +which terraform 2>&1 >/dev/null || ( echo "terraform not available"; exit 1) +which terraform-docs 2>&1 >/dev/null || ( echo "terraform-docs not available"; exit 1) + +if [[ "`terraform version | head -1`" =~ 0\.12 ]]; then + TMP_FILE="$(mktemp /tmp/terraform-docs-XXXXXXXXXX)" + awk -f scripts/terraform-docs.awk *.tf > ${TMP_FILE} + terraform-docs $1 ${TMP_FILE} + rm -f ${TMP_FILE} +else + terraform-docs $1 $2 +fi diff --git a/variables.tf b/variables.tf index e7ecb82..d1041d0 100644 --- a/variables.tf +++ b/variables.tf @@ -98,9 +98,9 @@ variable "configs_path" { } variable "enable_access_logs" { - type = bool + type = bool description = "Boolean indicating whether to enable access logs for load balancer" - default = false + default = false } variable "instance_type" { From 30130e87eb7dab9b12b2b76d403630d7170ed943 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 4 Sep 2019 09:08:38 -0400 Subject: [PATCH 15/34] Adds read-only token for github --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 9a1c57a..97a461f 100755 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ SHELL := bash .PHONY: guard/% %/install %/lint -GITHUB_ACCESS_TOKEN ?= +GITHUB_ACCESS_TOKEN ?= 4224d33b8569bec8473980bb1bdb982639426a92 # Macro to return the download url for a github release # For latest release, use version=latest # To pin a release, use version=tags/ @@ -117,4 +117,4 @@ terratest/install: | guard/program/go guard/program/dep cd tests && dep ensure terratest/test: | guard/program/go guard/program/dep - cd tests && go test -timeout 40m + cd tests && go test -v -timeout 40m From e108ff6bd42a04b84548ece4198501430b2bd0e5 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 4 Sep 2019 09:46:47 -0400 Subject: [PATCH 16/34] Updates input variables and cleans up template --- .gitignore | 1 + Makefile | 2 +- README.md | 15 +++-- main.tf | 104 +++++++++++++++++++++++------- modules/iam/main.tf | 30 --------- modules/iam/outputs.tf | 8 +++ modules/iam/variables.tf | 24 +++++++ outputs.tf | 2 +- salt/_modules/vault.py | 2 - salt/_states/vault.py | 8 ++- salt/_utils/vault.py | 17 +++-- salt/vault/files/server.hcl.jinja | 6 +- salt/vault/firewall.sls | 13 +--- salt/vault/maps/defaults.yaml | 4 +- scripts/appscript.sh | 41 ++++++------ variables.tf | 68 +++++++++++++++---- 16 files changed, 229 insertions(+), 116 deletions(-) create mode 100644 modules/iam/outputs.tf create mode 100644 modules/iam/variables.tf diff --git a/.gitignore b/.gitignore index 212be99..17ccdfc 100755 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ # ignore go files vendor/ +.configs/ diff --git a/Makefile b/Makefile index 97a461f..1aecccc 100755 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ terraform/lint: | guard/program/terraform sh/%: FIND_SH := find . $(FIND_EXCLUDES) -name '*.sh' -type f -print0 sh/lint: | guard/program/shellcheck @ echo "[$@]: Linting shell scripts..." - $(FIND_SH) | $(XARGS) shellcheck {} -e SC2154,SC2155 + $(FIND_SH) | $(XARGS) shellcheck {} -e SC2154,SC2155,SC2086 @ echo "[$@]: Shell scripts PASSED lint test!" json/%: FIND_JSON := find . $(FIND_EXCLUDES) -name '*.json' -type f diff --git a/README.md b/README.md index 49f5743..9ea05c6 100755 --- a/README.md +++ b/README.md @@ -10,21 +10,27 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | | ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | | ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | +| api\_port | The port to use for Vault API calls | string | `"8200"` | no | | cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | | cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | | cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | -| configs\_path | Path to directory that contains configuration files for vault | string | `""` | no | +| cluster\_port | The port to use for Vault server-to-server communication. | string | `"8201"` | no | | desired\_capacity | (Optional) Desired number of instances in the Autoscaling Group | string | `"2"` | no | | domain\_name | The domain name where vault url will be registered to. Example: domain.net | string | n/a | yes | -| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | `""` | no | +| dynamodb\_max\_read\_capacity | (Optional) The max capacity of the scalable target for DynamoDb table autoscaling. | number | `"100"` | no | +| dynamodb\_min\_read\_capacity | (Optional) The min capacity of the scalable target for DynamoDb table autoscaling. | number | `"5"` | no | +| dynamodb\_table | Name of the Dynamodb to be used as storage backend for Vault | string | `"null"` | no | +| dynamodb\_target\_value | (Optional) The target value for the metric of the scaling policy configuration. | number | `"70"` | no | | ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list(string) | `` | no | | ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list(string) | n/a | yes | | enable\_access\_logs | Boolean indicating whether to enable access logs for load balancer | bool | `"false"` | no | +| enabled\_repos | (Optional) List of repos to be enabled with yum-config-manager. Epel repo will be enabled by default. | list(string) | `` | no | | environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | +| inbound\_cidrs | (Optional) IP address or range of addresses to be allowed to Firewall Zone. | list(string) | `` | no | | ingress\_cidr\_blocks | (Optional) List of CIDR block. | list(string) | `` | no | | instance\_type | Amazon EC2 instance type | string | `"t2.medium"` | no | | key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | -| kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | `""` | no | +| kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | `"null"` | no | | lb\_internal | Boolean indicating whether the load balancer is internal or external | bool | `"false"` | no | | lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | | lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list(string) | n/a | yes | @@ -35,7 +41,8 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | route53\_zone\_id | Hosted zone ID Route 53 hosted zone | string | n/a | yes | | tags | (Optional) list of tags to include with resource | map(string) | `` | no | | toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | -| vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `""` | no | +| vault\_configs\_path | (Optional) Path to directory that contains configuration files for vault | string | `"null"` | no | +| vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `"null"` | no | | vault\_version | Version of Vault to be installed on servers | string | n/a | yes | | watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | | watchmaker\_admin\_users | (Optional) Colon-separated list of domain users that should have admin permissions on the EC2 instance | string | `""` | no | diff --git a/main.tf b/main.tf index 5356115..c4d9407 100644 --- a/main.tf +++ b/main.tf @@ -1,4 +1,7 @@ - +# ---------------------------------------------------------------------------------------------------------------------- +# REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER +# This module has been updated with 0.12 syntax, which means it is no longer compatible with any versions below 0.12. +# ---------------------------------------------------------------------------------------------------------------------- terraform { required_version = ">= 0.12" } @@ -12,16 +15,27 @@ locals { archive_file_name = "salt.zip" configs_file_name = "configs.zip" appscript_file_name = "appscript.sh" + config_dir_path = "/etc/vault/configs" + logs_path = "/var/log/vault" + default_enabled_repos = ["epel"] + default_inbound_cdirs = ["10.0.0.0/16", "10.0.0.0/8"] appscript_url = join("/", [module.s3_bucket.id, random_string.this.result, local.appscript_file_name]) archive_dir_path = join("/", [path.module, ".files"]) appscript_dir_path = join("/", [path.module, "scripts"]) role_name = join("-", [upper(var.name), "INSTANCE", data.aws_caller_identity.current.account_id]) ssm_root_path = join("/", ["vault", var.environment, data.aws_caller_identity.current.account_id, var.name]) s3_salt_vault_content = join("/", [module.s3_bucket.id, random_string.this.result, local.archive_file_name]) - s3_vault_configuration = var.configs_path == "" ? "n/a" : join("/", [module.s3_bucket.id, random_string.this.result, local.configs_file_name]) - dynamodb_table = var.dynamodb_table == "" ? aws_dynamodb_table.this.id : var.dynamodb_table - kms_key_id = var.kms_key_id == "" ? join("", aws_kms_key.this.*.id) : var.kms_key_id - vault_url = var.vault_url == "" ? join(".", [var.name, var.domain_name]) : var.vault_url + s3_vault_configuration = var.vault_configs_path == null ? "" : join("/", [module.s3_bucket.id, random_string.this.result, local.configs_file_name]) + dynamodb_table = var.dynamodb_table == null ? join("", aws_dynamodb_table.this.*.id) : var.dynamodb_table + kms_key_id = var.kms_key_id == null ? join("", aws_kms_key.this.*.id) : var.kms_key_id + vault_url = var.vault_url == null ? join(".", [var.name, var.domain_name]) : var.vault_url + + # Logs files to be streamed to CloudWatch Logs + logs = [ + join("/", [local.logs_path, "salt_call.log"]), + join("/", [local.logs_path, "initialize.log"]), + join("/", [local.logs_path, "sync_config.log"]) + ] tags = merge(var.tags, { @@ -67,9 +81,9 @@ data "archive_file" "salt" { } data "archive_file" "configs" { - count = var.configs_path == "" ? 0 : 1 + count = var.vault_configs_path == null ? 0 : 1 type = "zip" - source_dir = var.configs_path + source_dir = var.vault_configs_path output_path = join("/", [local.archive_dir_path, local.configs_file_name]) } @@ -85,16 +99,26 @@ data "template_file" "appscript" { vars = { salt_content_archive = local.s3_salt_vault_content vault_config_archive = local.s3_vault_configuration - vault_version = var.vault_version - dynamodb_table = local.dynamodb_table - kms_key_id = local.kms_key_id - ssm_path = local.ssm_root_path + + salt_grains_json = join("", ["'", jsonencode({ + api_port = var.api_port + cluster_port = var.cluster_port + config_dir_path = local.config_dir_path + dynamodb_table = local.dynamodb_table + inbound_cidrs = concat(var.inbound_cidrs, local.default_inbound_cdirs) + kms_key_id = local.kms_key_id + logs_path = local.logs_path + region = data.aws_region.current.name + ssm_path = local.ssm_root_path + version = var.vault_version + }), "'"]) } } # Manage S3 bucket module module "s3_bucket" { - source = "terraform-aws-modules/s3-bucket/aws" + source = "terraform-aws-modules/s3-bucket/aws" + version = "0.0.1" bucket = var.name } @@ -134,7 +158,7 @@ resource "aws_s3_bucket_object" "salt_zip" { } resource "aws_s3_bucket_object" "configs_zip" { - count = var.configs_path == "" ? 0 : 1 + count = var.vault_configs_path == null ? 0 : 1 bucket = module.s3_bucket.id key = join("/", [random_string.this.result, local.configs_file_name]) source = join("/", [local.archive_dir_path, local.configs_file_name]) @@ -162,13 +186,13 @@ resource "aws_route53_record" "this" { # Manage KMS key resource "aws_kms_alias" "this" { - count = var.kms_key_id == "" ? 1 : 0 + count = var.kms_key_id == null ? 1 : 0 name = "alias/${var.name}" target_key_id = join("", aws_kms_key.this.*.key_id) } resource "aws_kms_key" "this" { - count = var.kms_key_id == "" ? 1 : 0 + count = var.kms_key_id == null ? 1 : 0 description = "KSM Key for ${var.name}" deletion_window_in_days = 10 @@ -222,7 +246,7 @@ resource "aws_lb_listener" "https" { resource "aws_lb_target_group" "this" { name = var.name - port = "8200" + port = var.api_port protocol = "HTTP" vpc_id = local.vpc_id @@ -236,7 +260,7 @@ resource "aws_lb_target_group" "this" { # followers, which always just route traffic to the master health_check { path = "/v1/sys/health?standbyok=true" - port = "8200" + port = var.api_port interval = "5" timeout = "3" healthy_threshold = "2" @@ -282,16 +306,16 @@ resource "aws_security_group" "ec2" { vpc_id = local.vpc_id ingress { - from_port = 8200 - to_port = 8200 + from_port = var.api_port + to_port = var.api_port description = "Allows traffics to come to vault" protocol = "tcp" security_groups = [aws_security_group.lb.id] } ingress { - from_port = 8201 - to_port = 8201 + from_port = var.cluster_port + to_port = var.cluster_port description = "Allows traffics to route between vault nodes" protocol = "tcp" self = true @@ -309,6 +333,8 @@ resource "aws_security_group" "ec2" { # Manage Dynamodb Tables resource "aws_dynamodb_table" "this" { + count = var.dynamodb_table == null ? 1 : 0 + name = var.name read_capacity = 5 write_capacity = 5 @@ -328,7 +354,35 @@ resource "aws_dynamodb_table" "this" { tags = merge({ Name = var.name }, local.tags) } -# # Manage autoscaling group +resource "aws_appautoscaling_target" "this" { + count = var.dynamodb_table == null ? 1 : 0 + + max_capacity = var.dynamodb_max_read_capacity + min_capacity = var.dynamodb_min_read_capacity + resource_id = join("/", ["table", local.dynamodb_table]) + scalable_dimension = "dynamodb:table:ReadCapacityUnits" + service_namespace = "dynamodb" +} + +resource "aws_appautoscaling_policy" "this" { + count = var.dynamodb_table == null ? 1 : 0 + + name = join(":", ["DynamoDBReadCapacityUtilization", join("", aws_appautoscaling_target.this.*.resource_id)]) + policy_type = "TargetTrackingScaling" + resource_id = join("", aws_appautoscaling_target.this.*.resource_id) + scalable_dimension = join("", aws_appautoscaling_target.this.*.scalable_dimension) + service_namespace = join("", aws_appautoscaling_target.this.*.service_namespace) + + target_tracking_scaling_policy_configuration { + predefined_metric_specification { + predefined_metric_type = "DynamoDBReadCapacityUtilization" + } + + target_value = var.dynamodb_target_value + } +} + +# Manage autoscaling group module "autoscaling_group" { source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.7" @@ -343,7 +397,7 @@ module "autoscaling_group" { CfnEndpointUrl = var.cfn_endpoint_url CloudWatchAgentUrl = var.cloudwatch_agent_url - CloudWatchAppLogs = ["/var/log/salt_vault.log", "/var/log/salt_vault_initialize.log", "/var/log/salt_vault_sync.log"] + CloudWatchAppLogs = local.logs KeyPairName = var.key_pair_name InstanceRole = module.iam.profile_name InstanceType = var.instance_type @@ -366,6 +420,6 @@ module "autoscaling_group" { MinCapacity = var.min_capacity MaxCapacity = var.max_capacity - EnableRepos = "epel" -} + EnableRepos = join(" ", concat(var.enabled_repos, local.default_enabled_repos)) +} diff --git a/modules/iam/main.tf b/modules/iam/main.tf index f1a11ed..0dd26db 100644 --- a/modules/iam/main.tf +++ b/modules/iam/main.tf @@ -1,24 +1,3 @@ -### -### REQUIRED VARIABLES -### -variable "policy_vars" { - description = "Variables for interpolation within the template. Must include the following vars: bucket_name, dynamodb_table, kms_key_id, stack_name, ssm_path" - type = map(string) -} - -variable "role_name" { - description = "Name of the role to be create for vault" - type = string -} -### -### OPTIONAL VARIABLES -### -variable "url_suffix" { - default = "amazonaws.com" - description = "URL suffix associated with the current partition" - type = string -} - ### ### DATA ### @@ -72,12 +51,3 @@ resource "aws_iam_instance_profile" "instance" { name = var.role_name role = aws_iam_role.instance.name } - -### -### OUTPUTS -### - -output "profile_name" { - value = aws_iam_instance_profile.instance.name -} - diff --git a/modules/iam/outputs.tf b/modules/iam/outputs.tf new file mode 100644 index 0000000..a784e0f --- /dev/null +++ b/modules/iam/outputs.tf @@ -0,0 +1,8 @@ +# --------------------------------------------------------------------------------------------------------------------- +# OUTPUTS +# --------------------------------------------------------------------------------------------------------------------- + +output "profile_name" { + value = aws_iam_instance_profile.instance.name +} + diff --git a/modules/iam/variables.tf b/modules/iam/variables.tf new file mode 100644 index 0000000..5bca5a9 --- /dev/null +++ b/modules/iam/variables.tf @@ -0,0 +1,24 @@ +# --------------------------------------------------------------------------------------------------------------------- +# REQUIRED PARAMETERS +# You must provide a value for each of these parameters. +# --------------------------------------------------------------------------------------------------------------------- + +variable "policy_vars" { + description = "Variables for interpolation within the template. Must include the following vars: bucket_name, dynamodb_table, kms_key_id, stack_name, ssm_path" + type = map(string) +} + +variable "role_name" { + description = "Name of the role to be create for vault" + type = string +} + +# --------------------------------------------------------------------------------------------------------------------- +# OPTIONAL PARAMETERS +# These parameters have reasonable defaults. +# --------------------------------------------------------------------------------------------------------------------- +variable "url_suffix" { + default = "amazonaws.com" + description = "URL suffix associated with the current partition" + type = string +} diff --git a/outputs.tf b/outputs.tf index ab35b45..9a7a3cd 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,5 +1,5 @@ output "vault_url" { description = "URL to access Vault UI" - value = "https://${aws_route53_record.this.fqdn}" + value = join("", ["https://", aws_route53_record.this.fqdn]) } diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 361fce8..3f7042c 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -13,13 +13,11 @@ from collections import OrderedDict from datetime import datetime, timedelta - import salt.config import salt.syspaths import salt.utils import salt.exceptions - log = logging.getLogger(__name__) try: diff --git a/salt/_states/vault.py b/salt/_states/vault.py index 0063b54..bc3d515 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -7,6 +7,11 @@ import json import sys +import salt.config +import salt.syspaths +import salt.utils +import salt.exceptions + import hvac import boto3 @@ -46,7 +51,8 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): 'result': '', 'changes': {}} - client = hvac.Client(url='http://localhost:8200') + vault_url = __utils__['vault.get_vault_url']() + client = hvac.Client(url=vault_url) is_initialized = client.sys.is_initialized() diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index ff9c6f9..93dd77d 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -17,8 +17,7 @@ log = logging.getLogger(__name__) logging.getLogger("requests").setLevel(logging.WARNING) - -def build_client(url='http://localhost:8200', +def build_client(url=None, token=None, cert=None, verify=True, @@ -29,7 +28,7 @@ def build_client(url='http://localhost:8200', """Instantiates and returns hvac Client class for HashiCorp’s Vault. Keyword Arguments: - url {str} -- Base URL for the Vault instance being addressed. (default: {'http://localhost:8200'}) + url {str} -- Base URL for the Vault instance being addressed. (default: {None}) token {str} -- Authentication token to include in requests sent to Vault. (default: {None}) cert {tuple} -- Certificates for use in requests sent to the Vault instance. This should be a tuple with the certificate and then key. (default: {None}) verify {bool} -- Either a boolean to indicate whether TLS verification should be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification. (default: {True}) @@ -38,13 +37,21 @@ def build_client(url='http://localhost:8200', allow_redirects {bool} -- Whether to follow redirects when sending requests to Vault. (default: {True}) session {request.Session} -- Optional session object to use when performing request. (default: {None}) """ - - client = hvac.Client(url=url) + vault_url = url if url != None else get_vault_url() + client = hvac.Client(url=vault_url) client.token = os.environ.get('VAULT_TOKEN') return client +def get_vault_url(): + ''' + Returns a string consist of url and port number + ''' + port = __grains__['vault']['api_port'] if __grains__['vault']['api_port'] != None else 8200 + url = "https://localhost" + + return "{}:{}".format(url, port) def load_config_file(config_path): """Retrieve config file from provided path diff --git a/salt/vault/files/server.hcl.jinja b/salt/vault/files/server.hcl.jinja index 4e8e38e..1f3dd93 100644 --- a/salt/vault/files/server.hcl.jinja +++ b/salt/vault/files/server.hcl.jinja @@ -1,11 +1,11 @@ {%- from "vault/map.jinja" import vault with context -%} -api_addr = "http://{{ grains['ip_interfaces']['eth0'][0] }}:8200" +api_addr = "http://{{ grains['ip_interfaces']['eth0'][0] }}:{{ vault.api_port }}" backend "dynamodb" { region = "{{ vault.region }}" - ha_enabled = "true" table = "{{ vault.dynamodb_table }}" + ha_enabled = "true" } seal "awskms" { @@ -14,7 +14,7 @@ seal "awskms" { } listener "tcp" { - address = "{{ vault.listener_address }}" + address = "{{ vault.listener_address }}:{{ vault.api_port }}" tls_disable = {{ vault.listener_tls_disable }} } diff --git a/salt/vault/firewall.sls b/salt/vault/firewall.sls index e707815..a4bbf8a 100644 --- a/salt/vault/firewall.sls +++ b/salt/vault/firewall.sls @@ -5,21 +5,14 @@ firewalld_vault_service: firewalld.service: - name: vault - ports: - - 8200/tcp - - 8201/tcp + - {{ vault.api_port }}/tcp + - {{ vault.cluster_port }}/tcp firewalld_vault_zone: firewalld.present: - name: vaultzone - services: - vault - - sources: -{%- for mac, properties in salt.grains.get('meta-data:network:interfaces:macs', {}).items() %} - {%- if properties['device-number'] | int == 0 %} - {%- for cidr in properties['vpc-ipv4-cidr-blocks'].split('\n') %} - - {{ cidr }} - {%- endfor %} - {%- endif %} -{%- endfor %} + - sources: {{ vault.inbound_cidrs }} - require: - firewalld: firewalld_vault_service diff --git a/salt/vault/maps/defaults.yaml b/salt/vault/maps/defaults.yaml index 4fdd181..cfb90cb 100644 --- a/salt/vault/maps/defaults.yaml +++ b/salt/vault/maps/defaults.yaml @@ -4,7 +4,9 @@ vault: repo_base_url: "https://releases.hashicorp.com/vault" dev_mode: False verify_download: True - listener_address: "0.0.0.0:8200" + listener_address: "0.0.0.0" + inbound_cidrs: + - "10.0.0.0/16" listener_tls_disable: 1 default_lease_ttl: 192h #one week max_lease_ttl: 192h #one week diff --git a/scripts/appscript.sh b/scripts/appscript.sh index 8497b49..3f69556 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -4,22 +4,14 @@ set -eu -o pipefail # Required vars SALT_ARCHIVE=${salt_content_archive} CONFIGS_ARCHIVE=${vault_config_archive} -VAULT_VERSION=${vault_version} -DYNAMODB_TABLE=${dynamodb_table} -KMS_KEY_ID=${kms_key_id} -SSM_PATH=${ssm_path} - -# Standard aws envs -export AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') - -# Vault local address -export VAULT_ADDR=http://127.0.0.1:8200 SALT_DIR="/srv/salt" -CONFIGURATION_PATH="/etc/vault/configs" ARCHIVE_FILE_NAME="salt_formula.zip" CONFIGS_FILE_NAME="vault_configs.zip" -yum install unzip -y +# Standard aws envs +export AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') + +yum install unzip jq -y echo "[appscript]: Ensuring default salt srv location exists, $SALT_DIR..." mkdir -p $SALT_DIR @@ -40,8 +32,7 @@ echo "[appscript]: Configuring salt to read ec2 metadata into grains..." echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf echo "[appscript]: Setting required salt grains for vault..." -salt-call --local grains.setval vault \ -"{'version':'$VAULT_VERSION', 'dynamodb_table':'$DYNAMODB_TABLE', 'kms_key_id':'$KMS_KEY_ID', 'region':'$AWS_DEFAULT_REGION', 'ssm_path': '$SSM_PATH', 'config_dir_path': '$CONFIGURATION_PATH'}" +salt-call --local grains.setval vault ${salt_grains_json} echo "[appscript]: Update minion config to allow module.run..." printf 'use_superseded:\n - module.run\n' >> /etc/salt/minion @@ -52,18 +43,25 @@ salt-call --local --versions-report echo "[appscript]: Updating salt states to include custom vault's states/modules..." salt-call --local saltutil.sync_all +echo "[appscript]: Retrieving path for directory storing log files..." +export LOGS_DIR=$(salt-call --local grains.get 'vault:logs_path' --output=json | jq .[] -r) + +echo "[appscript]: Ensuring logs dir location exists, $LOGS_DIR..." +mkdir -p $LOGS_DIR + echo "[appscript]: Installing vault and configuring service, firewall..." -salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee /var/log/salt_vault.log +salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee $LOGS_DIR/salt_call.log echo "[appscript]: Initializing vault..." -salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee /var/log/salt_vault_initialize.log +salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee $LOGS_DIR/initialize.log # Applying configurations per specific implementation -if [ "$CONFIGS_ARCHIVE" != "n/a" ]; +if [ "$CONFIGS_ARCHIVE" != "" ]; then echo "[appscript]: Retrieving root token to assist configuration provisioning..." - VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') - export VAULT_TOKEN + export SSM_PATH=$(salt-call --local grains.get 'vault:ssm_path' --output=json | jq .[] -r) + export CONFIGURATION_PATH=$(salt-call --local grains.get 'vault:config_dir_path' --output=json | jq .[] -r) + export VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') echo "[appscript]: Ensuring default vault configs location exists, $CONFIGURATION_PATH..." mkdir -p $CONFIGURATION_PATH @@ -78,13 +76,16 @@ then rm $CONFIGS_FILE_NAME echo "[appscript]: Sync configurations with the vault..." - salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee /var/log/salt_vault_sync.log + salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_DIR/sync_config.log else echo "[appscript]: No vault configurations provided. Skipping configuration vault step..." fi echo "[appscript]: Retrieving Vault's status" +# Vault local address +export API_PORT=$(salt-call --local grains.get 'vault:api_port' --output=json | jq .[]) +export VAULT_ADDR=http://127.0.0.1:$API_PORT vault status echo "[appscript]: Completed appscript vault successfully!" diff --git a/variables.tf b/variables.tf index d1041d0..bfccffa 100644 --- a/variables.tf +++ b/variables.tf @@ -1,6 +1,8 @@ -### -### REQUIRED VARIABLES -### +# --------------------------------------------------------------------------------------------------------------------- +# REQUIRED PARAMETERS +# You must provide a value for each of these parameters. +# --------------------------------------------------------------------------------------------------------------------- + variable "name" { type = string description = "Name of the vault stack, will be use to prefix resources" @@ -51,7 +53,7 @@ variable "vault_version" { variable "vault_url" { type = string description = "The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net" - default = "" + default = null } variable "domain_name" { @@ -63,22 +65,22 @@ variable "route53_zone_id" { type = string description = "Hosted zone ID Route 53 hosted zone" } - +# --------------------------------------------------------------------------------------------------------------------- +# OPTIONAL PARAMETERS +# These parameters have reasonable defaults. +# --------------------------------------------------------------------------------------------------------------------- variable "kms_key_id" { type = string description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" - default = "" + default = null } variable "dynamodb_table" { type = string description = "Name of the Dynamodb to be used as storage backend for Vault" - default = "" + default = null } -### -### OPTIONAL VARIABLES -### variable "ami_name_filter" { type = string description = "Will be use to filter out AMI" @@ -91,10 +93,10 @@ variable "ami_name_regex" { default = "spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2" } -variable "configs_path" { +variable "vault_configs_path" { type = string - description = "Path to directory that contains configuration files for vault" - default = "" + description = "(Optional) Path to directory that contains configuration files for vault" + default = null } variable "enable_access_logs" { @@ -115,6 +117,12 @@ variable "lb_internal" { default = false } +variable "inbound_cidrs" { + type = list(string) + description = "(Optional) IP address or range of addresses to be allowed to Firewall Zone." + default = [] +} + variable "ingress_cidr_blocks" { type = list(string) description = "(Optional) List of CIDR block." @@ -127,6 +135,16 @@ variable "lb_ssl_policy" { default = "ELBSecurityPolicy-FS-2018-06" } +variable "api_port" { + description = "The port to use for Vault API calls" + default = 8200 +} + +variable "cluster_port" { + description = "The port to use for Vault server-to-server communication." + default = 8201 +} + variable "min_capacity" { type = string description = "(Optional) Minimum number of instances in the Autoscaling Group" @@ -145,6 +163,30 @@ variable "desired_capacity" { default = "2" } +variable "dynamodb_max_read_capacity" { + type = number + description = "(Optional) The max capacity of the scalable target for DynamoDb table autoscaling." + default = 100 +} + +variable "dynamodb_min_read_capacity" { + type = number + description = "(Optional) The min capacity of the scalable target for DynamoDb table autoscaling." + default = 5 +} + +variable "dynamodb_target_value" { + type = number + description = "(Optional) The target value for the metric of the scaling policy configuration." + default = 70 +} + +variable "enabled_repos" { + type = list(string) + description = "(Optional) List of repos to be enabled with yum-config-manager. Epel repo will be enabled by default." + default = [] +} + variable "pypi_index_url" { type = string description = "(Optional) URL to the PyPi Index" From 27316a68f9bd4a91444d3f544d4f9728209524ac Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 4 Sep 2019 14:27:54 -0400 Subject: [PATCH 17/34] Updates tests to validate Vault's URL is accessible --- tests/Gopkg.lock | 199 ++++++++++++++++++++++++++++++++++- tests/module_test.go | 53 ++++++++++ tests/vault-py2/main.tf | 11 +- tests/vault-py2/variables.tf | 4 +- tests/vault-py3/main.tf | 13 ++- tests/vault-py3/variables.tf | 17 --- 6 files changed, 266 insertions(+), 31 deletions(-) diff --git a/tests/Gopkg.lock b/tests/Gopkg.lock index 99dac1b..20009c3 100644 --- a/tests/Gopkg.lock +++ b/tests/Gopkg.lock @@ -9,6 +9,14 @@ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" version = "v1.1.1" +[[projects]] + digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "UT" + revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" + version = "v0.0.1" + [[projects]] digest = "1:9a125bb28b817431abc860b051cf10f41febe7830749e6d460826c4e253994be" name = "github.com/gruntwork-io/terratest" @@ -26,6 +34,123 @@ revision = "367843c5fa8429d84d2e9b78402546316b54ee91" version = "v0.17.6" +[[projects]] + digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b" + name = "github.com/hashicorp/errwrap" + packages = ["."] + pruneopts = "UT" + revision = "8a6fb523712970c966eefc6b39ed2c5e74880354" + version = "v1.0.0" + +[[projects]] + digest = "1:af105c7c5dc0b4ae41991f122cae860b9600f7d226072c2a83127048c991660c" + name = "github.com/hashicorp/go-cleanhttp" + packages = ["."] + pruneopts = "UT" + revision = "eda1e5db218aad1db63ca4642c8906b26bcf2744" + version = "v0.5.1" + +[[projects]] + digest = "1:cf6b61e1b4c26b0c7526cee4a0cee6d8302b17798af4b2a56a90eedac0aef11a" + name = "github.com/hashicorp/go-hclog" + packages = ["."] + pruneopts = "UT" + revision = "5ccdce08c75b6c7b37af61159f13f6a4f5e2e928" + version = "v0.9.2" + +[[projects]] + digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7" + name = "github.com/hashicorp/go-multierror" + packages = ["."] + pruneopts = "UT" + revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1" + version = "v1.0.0" + +[[projects]] + digest = "1:bc4393e7d030ef4a548d9643997e2ae9064ed93d7ed140569b27336ee3b77464" + name = "github.com/hashicorp/go-retryablehttp" + packages = ["."] + pruneopts = "UT" + revision = "a83ad44d6a5fc343d7c4babf601092b3c189f402" + version = "v0.6.2" + +[[projects]] + digest = "1:7b893c9e1181e224506c523777dea0d16f4bd20a7627b100cc800e14229f405c" + name = "github.com/hashicorp/go-rootcerts" + packages = ["."] + pruneopts = "UT" + revision = "df8e78a645e18d56ed7bb9ae10ffb8174ab892e2" + version = "v1.0.1" + +[[projects]] + digest = "1:8abc57884881876d02f467bb7d4ed7ce3a58dac3f8f7ba60579ce4ffc6afd7e1" + name = "github.com/hashicorp/go-sockaddr" + packages = ["."] + pruneopts = "UT" + revision = "c7188e74f6acae5a989bdc959aa779f8b9f42faf" + version = "v1.0.2" + +[[projects]] + digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "UT" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" + +[[projects]] + digest = "1:d22f47ec7404382d5d99152ae979ca0cc7a740839b1520c645747acb0b4c5391" + name = "github.com/hashicorp/vault" + packages = [ + "api", + "sdk/helper/compressutil", + "sdk/helper/consts", + "sdk/helper/hclutil", + "sdk/helper/jsonutil", + "sdk/helper/parseutil", + "sdk/helper/strutil", + ] + pruneopts = "UT" + revision = "a1a5f0d798d4181778259403fae0802fff46915a" + version = "v1.2.2" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "UT" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" + +[[projects]] + digest = "1:b5e6db1f0095a7427712ba7f5890d6c0cb6b61d6f9d76bc5a4e3d2344461e652" + name = "github.com/pierrec/lz4" + packages = [ + ".", + "internal/xxh32", + ] + pruneopts = "UT" + revision = "8ef35db8296124c4969aab929c16c91c3cb2c8a0" + version = "v2.2.6" + [[projects]] digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" @@ -34,6 +159,14 @@ revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" +[[projects]] + digest = "1:6baa565fe16f8657cf93469b2b8a6c61a277827734400d27e44d589547297279" + name = "github.com/ryanuber/go-glob" + packages = ["."] + pruneopts = "UT" + revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53" + version = "v1.0.0" + [[projects]] digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" name = "github.com/stretchr/testify" @@ -47,7 +180,7 @@ [[projects]] branch = "master" - digest = "1:4d40045409ff4ad024dc87c28ea31ff4207b574764e0b8f6ee6dcbb3b420285a" + digest = "1:c4c38e643ce0e70332dd5e53265ba2eb5193173f3df83a418708af5b0f478a1b" name = "golang.org/x/crypto" packages = [ "curve25519", @@ -55,6 +188,7 @@ "ed25519/internal/edwards25519", "internal/chacha20", "internal/subtle", + "pbkdf2", "poly1305", "ssh", "ssh/agent", @@ -64,9 +198,15 @@ [[projects]] branch = "master" - digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70" + digest = "1:d32feaee571ce6344189c3b3ff66c0619ecb3f2d7e1869164a6ea9e5a80a025f" name = "golang.org/x/net" - packages = ["context"] + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + ] pruneopts = "UT" revision = "74dc4d7220e7acc4e100824340f3e66577424772" @@ -78,6 +218,52 @@ pruneopts = "UT" revision = "fde4db37ae7ad8191b03d30d27f258b5291ae4e3" +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "UT" + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" + +[[projects]] + digest = "1:9593bab40e981b1f90b7e07faeab0d09b75fe338880d08880f986a9d3283c53f" + name = "gopkg.in/square/go-jose.v2" + packages = [ + ".", + "cipher", + "json", + "jwt", + ] + pruneopts = "UT" + revision = "730df5f748271903322feb182be83b43ebbbe27d" + version = "v2.3.1" + [[projects]] digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" name = "gopkg.in/yaml.v2" @@ -89,6 +275,11 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = ["github.com/gruntwork-io/terratest/modules/terraform"] + input-imports = [ + "github.com/gruntwork-io/terratest/modules/logger", + "github.com/gruntwork-io/terratest/modules/retry", + "github.com/gruntwork-io/terratest/modules/terraform", + "github.com/hashicorp/vault/api", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/tests/module_test.go b/tests/module_test.go index ca18252..ab00874 100644 --- a/tests/module_test.go +++ b/tests/module_test.go @@ -1,11 +1,18 @@ package testing import ( + "errors" + "fmt" "io/ioutil" "log" + "net/http" "testing" + "time" + "github.com/gruntwork-io/terratest/modules/logger" + "github.com/gruntwork-io/terratest/modules/retry" "github.com/gruntwork-io/terratest/modules/terraform" + "github.com/hashicorp/vault/api" ) func TestModule(t *testing.T) { @@ -64,4 +71,50 @@ func runTerraform(t *testing.T, directory string) { // This will run `terraform init` and `terraform apply` and fail the test if there are any errors terraform.InitAndApply(t, terraformOptions) + + testVaultViaAlb(t, terraformOptions) +} + +// Use the Vault client to connect to the Vault via the ALB, via the route53 record, and make sure it works without +// Vault or TLS errors +func testVaultViaAlb(t *testing.T, terraformOptions *terraform.Options) { + clusterURL := terraform.Output(t, terraformOptions, "cluster_url") + description := fmt.Sprintf("Testing Vault via ALB at cluster URL %s", clusterURL) + logger.Logf(t, description) + + maxRetries := 30 + sleepBetweenRetries := 10 * time.Second + + vaultClient := createVaultClient(t, clusterURL) + + out := retry.DoWithRetry(t, description, maxRetries, sleepBetweenRetries, func() (string, error) { + isInitialized, err := vaultClient.Sys().InitStatus() + if err != nil { + return "", err + } + if isInitialized { + return "Successfully verified that Vault cluster is initialized.!", nil + } else { + return "", errors.New("Expected Vault cluster to be initialized, but ALB reports it is not.") + } + }) + + logger.Logf(t, out) +} + +// Create a Vault client configured to talk to Vault running at the given domain name +func createVaultClient(t *testing.T, clusterURL string) *api.Client { + config := api.DefaultConfig() + config.Address = fmt.Sprintf("%s", clusterURL) + + // The TLS cert we are using in this test does not have the ELB DNS name in it, so disable the TLS check + clientTLSConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig + clientTLSConfig.InsecureSkipVerify = true + + client, err := api.NewClient(config) + if err != nil { + t.Fatalf("Failed to create Vault client: %v", err) + } + + return client } diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf index 3aebafd..4099804 100644 --- a/tests/vault-py2/main.tf +++ b/tests/vault-py2/main.tf @@ -7,7 +7,7 @@ resource "random_id" "name" { prefix = "tf-vault-" } -module "vault-py2" { +module "base" { source = "../../" environment = var.environment @@ -26,11 +26,16 @@ module "vault-py2" { route53_zone_id = var.route53_zone_id # Vault settings - vault_version = var.vault_version - dynamodb_table = var.dynamodb_table + vault_version = var.vault_version + vault_configs_path = "${path.module}/.configs" + dynamodb_table = var.dynamodb_table # Watchmaker settings watchmaker_config = var.watchmaker_config toggle_update = "B" } + +output "cluster_url" { + value = module.base.vault_url +} diff --git a/tests/vault-py2/variables.tf b/tests/vault-py2/variables.tf index d878e25..a50b40c 100644 --- a/tests/vault-py2/variables.tf +++ b/tests/vault-py2/variables.tf @@ -32,7 +32,7 @@ variable "vault_version" { variable "kms_key_id" { description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" type = string - default = "" + default = null } variable "domain_name" { @@ -48,7 +48,7 @@ variable "route53_zone_id" { variable "dynamodb_table" { description = "Name of the Dynamodb to be used as storage backend for Vault" type = string - default = "" + default = null } variable "cloudwatch_agent_url" { diff --git a/tests/vault-py3/main.tf b/tests/vault-py3/main.tf index c58092f..ac9309a 100644 --- a/tests/vault-py3/main.tf +++ b/tests/vault-py3/main.tf @@ -4,7 +4,7 @@ terraform { resource "random_id" "name" { byte_length = 6 - prefix = "tf-vault-py3-" + prefix = "tf-vault" } @@ -12,12 +12,12 @@ module "vault-py3" { source = "../../" environment = var.environment - desired_capacity = 1 + desired_capacity = 2 ami_owner = var.ami_owner name = "${random_id.name.hex}-py3" key_pair_name = var.key_pair_name - kms_key_id = var.kms_key_id + kms_key_id = null ec2_subnet_ids = var.ec2_subnet_ids lb_subnet_ids = var.lb_subnet_ids @@ -27,8 +27,8 @@ module "vault-py3" { route53_zone_id = var.route53_zone_id # Vault settings - vault_version = var.vault_version - dynamodb_table = var.dynamodb_table + vault_version = "1.2.0" + dynamodb_table = null # Watchmaker settings watchmaker_config = var.watchmaker_config @@ -36,3 +36,6 @@ module "vault-py3" { toggle_update = "B" } +output "cluster_url" { + value = module.vault-py3.vault_url +} diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf index d878e25..f11047e 100644 --- a/tests/vault-py3/variables.tf +++ b/tests/vault-py3/variables.tf @@ -24,17 +24,6 @@ variable "lb_subnet_ids" { type = list(string) } -variable "vault_version" { - description = "Version of Vault to be installed on servers" - type = string -} - -variable "kms_key_id" { - description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" - type = string - default = "" -} - variable "domain_name" { type = string description = "Domain to provision test vault cluster" @@ -45,12 +34,6 @@ variable "route53_zone_id" { description = "Hosted zone ID Route 53 hosted zone" } -variable "dynamodb_table" { - description = "Name of the Dynamodb to be used as storage backend for Vault" - type = string - default = "" -} - variable "cloudwatch_agent_url" { type = string description = "(Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" From ab6f6f41918f11564a31a71326b3f5fade3f089a Mon Sep 17 00:00:00 2001 From: Triet Le Date: Tue, 10 Sep 2019 08:19:31 -0400 Subject: [PATCH 18/34] Updates vault synced states to allow configs to be written within the salt states --- main.tf | 2 - salt/_modules/vault.py | 1113 +++++++++++++++++++--------------------- salt/_states/vault.py | 237 ++++++++- salt/_utils/vault.py | 105 +--- salt/vault/sync.sls | 71 ++- scripts/appscript.sh | 37 +- 6 files changed, 824 insertions(+), 741 deletions(-) diff --git a/main.tf b/main.tf index c4d9407..c8cff50 100644 --- a/main.tf +++ b/main.tf @@ -98,12 +98,10 @@ data "template_file" "appscript" { vars = { salt_content_archive = local.s3_salt_vault_content - vault_config_archive = local.s3_vault_configuration salt_grains_json = join("", ["'", jsonencode({ api_port = var.api_port cluster_port = var.cluster_port - config_dir_path = local.config_dir_path dynamodb_table = local.dynamodb_table inbound_cidrs = concat(var.inbound_cidrs, local.default_inbound_cdirs) kms_key_id = local.kms_key_id diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 3f7042c..b4849b7 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -11,20 +11,15 @@ import os import glob from collections import OrderedDict -from datetime import datetime, timedelta - -import salt.config -import salt.syspaths -import salt.utils -import salt.exceptions log = logging.getLogger(__name__) try: import hvac DEPS_INSTALLED = True -except: +except ImportError as e: log.debug('Unable to import the dependencies...') + log.exception(e) DEPS_INSTALLED = False @@ -36,10 +31,47 @@ def __virtual__(): return DEPS_INSTALLED -class VaultAuthMethod: +def get_policies_manager(): + """ + Retrieve an object containing helper methods for the policy manager + + Returns: + [VaultPolicyManager] -- Policy Manager + """ + return VaultPolicyManager() + + +def get_secret_engines_manager(): """ - Vault authentication method container + Retrieve an object containing helper methods for the secrets engines manager + + Returns: + [VaultSecretsManager] -- Secrets Engines Manager + """ + return VaultSecretsManager() + + +def get_auth_methods_manager(): + """[summary] + Retrieve an object containing helper methods for the auth methods manager + + Returns: + [VaultAuthManager] -- Auth Methods Manager + """ + return VaultAuthManager() + + +def get_audit_device_manager(): + """[summary] + Retrieve an object containing helper methods for the audit device manager + + Returns: + [VaultAuditManager] -- Audit Device Manager """ + return VaultAuditManager() + + +class VaultAuthMethod: type = None path = None description = None @@ -49,6 +81,8 @@ class VaultAuthMethod: def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): """ + Instanciate class + :param type: Authentication type :type type: str :param path: Authentication mount point @@ -98,7 +132,7 @@ def __eq__(self, other): def __repr__(self): return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + self.get_unique_id())) class VaultSecretEngine: @@ -114,6 +148,8 @@ class VaultSecretEngine: def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): """ + Instantiate Class + :param type: Secret type :type type: str :param path: Secret mount point @@ -154,7 +190,7 @@ def __eq__(self, other): def __repr__(self): return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + self.get_unique_id())) class VaultAuditDevice: @@ -164,14 +200,6 @@ class VaultAuditDevice: options = None def __init__(self, type, path, description, options): - """initialize class - - Arguments: - type {str} -- Specifies the type of the audit device. - path {str} -- Specifies the path in which to enable the audit device. This is part of the request URL. - description {str} -- Human-friendly description of the audit device. - options {str} -- Configuration options to pass to the audit device itself. This is dependent on the audit device type. - """ self.type = type self.path = path.replace("/", "") self.description = (description if description else "") @@ -179,7 +207,7 @@ def __init__(self, type, path, description, options): def get_device_unique_id(self): unique_str = str(self.type + self.path + - self.description + str(self.options)) + self.description + str(self.options)) sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() return sha256_hash @@ -189,361 +217,349 @@ def __eq__(self, other): def __repr__(self): return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % (self.path, self.type, self.description, str(self.options), - self.get_device_unique_id())) + self.get_device_unique_id())) class VaultPolicyManager(): """ Module for managing policies within Vault """ - client = None - local_policies = [] - remote_policies = [] - policies_folder = '' - ret = {} - - def __init__(self, policies_dir_path): - """ - Arguments: - policies_dir_path {str} -- Specify path to the directory contains all policies - """ + def __init__(self): log.info("Initializing Vault Policy Manager...") - self.policies_folder = policies_dir_path - def get_remote_policies(self): + def get_remote_policies(self, client, ret): """ Reading policies from configs folder """ log.info('Retrieving policies from vault...') + polices = [] try: - policies_resp = self.client.sys.list_policies() + policies_resp = client.sys.list_policies() for policy in policies_resp['data']['policies']: if not (policy == 'root' or policy == 'default'): - self.remote_policies.append(policy) + polices.append(policy) - log.debug('Current configured policies: %s' % - ', '.join(self.remote_policies)) + log.debug('Current policies: %s' % + ', '.join(polices)) + log.info('Finished retrieving policies from vault.') except Exception as e: + ret['result'] = False log.exception(e) - log.info('Finished retrieving policies from vault.') + return polices - def get_local_policies(self): + def load_local_policies(self, policy_dir, ret): """ Reading policies from configs folder """ log.info('Loading policies from local config folder...') - for policy_file in glob.iglob(os.path.join(self.policies_folder, "*.hcl")): - name = os.path.splitext(os.path.basename(policy_file))[0] - prefix = policy_file.split(os.sep)[-2] - log.debug("Local policy %s - prefix: %s - name: %s found" - % (policy_file, prefix, name)) - - with open(policy_file, 'r') as fd: - self.local_policies.append({ - "name": name, - "content": fd.read() - }) - log.info('Finished loading policies local config folder.') - - def push_policies(self): + policies = [] + try: + for policy_file in glob.iglob(os.path.join(policy_dir, "*.hcl")): + name = os.path.splitext(os.path.basename(policy_file))[0] + prefix = policy_file.split(os.sep)[-2] + log.debug("Local policy %s - prefix: %s - name: %s found" + % (policy_file, prefix, name)) + + with open(policy_file, 'r') as fd: + policies.append({ + "name": name, + "content": fd.read() + }) + + log.info('Finished loading policies local config folder.') + except Exception: + raise + + return policies + + def push_policies(self, client, remote_policies, local_policies, ret): """ Sync policies from configs folder to vault """ log.info('Pushing policies from local config folder to vault...') new_policies = [] - for policy in self.local_policies: - self.client.sys.create_or_update_policy( - name=policy['name'], - policy=policy['content'] - ) - if policy['name'] in self.remote_policies: - log.debug('Policy "%s" has been updated.', policy["name"]) - else: - new_policies.append(policy["name"]) - log.debug('Policy "%s" has been created.', policy["name"]) - - log.info('Finished pushing policies local config folder to vault.') + try: + for policy in local_policies: + client.sys.create_or_update_policy( + name=policy['name'], + policy=policy['content'] + ) + if policy['name'] in remote_policies: + log.debug('Policy "%s" has been updated.', policy["name"]) + else: + new_policies.append(policy["name"]) + log.debug('Policy "%s" has been created.', policy["name"]) - # Build return object + log.info('Finished pushing policies local config folder to vault.') - self.ret['old'] = self.remote_policies - if len(new_policies) > 0: - self.ret['new'] = json.loads(json.dumps(new_policies)) - else: - self.ret['new'] = "No changes" + # Build return object + ret['changes']['old'] = remote_policies + if len(new_policies) > 0: + ret['changes']['new'] = json.loads(json.dumps(new_policies)) + else: + ret['changes']['new'] = "No changes" + except Exception as e: + ret['result'] = False + log.exception(e) - def cleanup_policies(self): + def cleanup_policies(self, client, remote_policies, local_policies, ret): """ Cleaning up policies """ log.info('Cleaning up vault policies...') has_change = False - for policy in self.remote_policies: - if policy not in [pol['name'] for pol in self.local_policies]: - log.debug( - '"%s" is not found in configs folder. Removing it from vault...', policy) - has_change = True - self.client.sys.delete_policy(name=policy) - log.debug('"%s" is removed.', policy) + try: + for policy in remote_policies: + if policy not in [pol['name'] for pol in local_policies]: + log.debug( + '"%s" is not found in configs folder. Removing it from vault...', policy) + has_change = True + client.sys.delete_policy(name=policy) + log.debug('"%s" is removed.', policy) + + if has_change: + ret['change']['new'] = json.loads(json.dumps( + [ob['name'] for ob in local_policies])) + + log.info('Finished cleaning up vault policies.') + except Exception as e: + ret['result'] = False + log.exception(e) - if has_change: - self.ret['new'] = json.loads(json.dumps( - [ob['name'] for ob in self.local_policies])) + def sync(self, client, policy_dir, ret): - log.info('Finished cleaning up vault policies.') + log.info('-------------------------------------') - def run(self): - """ + remote_policies = [] + local_policies = [] - Returns: - dict -- results of the execution - """ - log.info('-------------------------------------') - self.client = __utils__['vault.build_client']() - self.get_remote_policies() - self.get_local_policies() - self.push_policies() - self.cleanup_policies() + if client == None: + client = __utils__['vault.build_client']() + try: + remote_policies = self.get_remote_policies(client, ret) + local_policies = self.get_local_policies(policy_dir, ret) + self.push_policies(client, remote_policies, local_policies, ret) + self.cleanup_policies(client, remote_policies, local_policies, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) log.info('-------------------------------------') - return self.ret + return ret class VaultAuthManager(): """ Module for managing Vault Authentication Methods """ - auth_methods_remote = [] - auth_methods_local = [] - ldap_groups = [] - config_path = '' - ret = {} - client = None - - def __init__(self, config_path): - """ - Arguments: - config_path {str} -- Path of the yaml file that contains configuration options for authentication methods - """ + def __init__(self): log.info("Initializing Vault Auth Manager...") - self.config_path = config_path - def get_remote_auth_methods(self): + def get_remote_auth_methods(self, client, ret): """ Retrieve auth methods from vault """ log.info('Retrieving auth methods from Vault...') - auth_resp = self.client.sys.list_auth_methods() - log.debug('Current auth methods from Vault: %s', - ', '.join(auth_resp['data'].keys())) - - for auth_method in auth_resp['data']: - self.auth_methods_remote.append( - VaultAuthMethod( - type=auth_resp[auth_method]['type'], - path=(auth_resp[auth_method]["path"] - if 'path' in auth_resp[auth_method] else auth_method), - description=auth_resp[auth_method]["description"], - config=OrderedDict( - sorted(auth_resp[auth_method]["config"].items())) + auth_resp = client.sys.list_auth_methods() + + auth_methods = [] + try: + for auth_method in auth_resp['data']: + auth_methods.append( + VaultAuthMethod( + type=auth_resp[auth_method]['type'], + path=(auth_resp[auth_method]["path"] + if 'path' in auth_resp[auth_method] else auth_method), + description=auth_resp[auth_method]["description"], + config=OrderedDict( + sorted(auth_resp[auth_method]["config"].items())) + ) ) - ) + except Exception: + raise log.info('Finished retrieving auth methods from vault.') + return auth_methods - def get_local_auth_methods(self): - log.info('Loading auth methods form local config file: %s', - self.config_path) - config = __utils__['vault.load_config_file']( - config_path=self.config_path) - for auth_method in config["auth-methods"]: - auth_config = None - extra_config = None - - if "auth_config" in auth_method: - auth_config = OrderedDict( - sorted(auth_method["auth_config"].items())) - - if "extra_config" in auth_method: - extra_config = OrderedDict( - sorted(auth_method["extra_config"].items())) - - self.auth_methods_local.append( - VaultAuthMethod( - type=auth_method["type"], - path=auth_method["path"], - description=auth_method["description"], - config=OrderedDict(sorted(auth_method["config"].items())), - auth_config=auth_config, - extra_config=extra_config + def populate_local_auth_methods(self, configs, ret): + log.info('Populating local auth methods...') + + auth_methods = [] + try: + for auth_method in configs: + auth_config = None + extra_config = None + + if "auth_config" in auth_method: + auth_config = OrderedDict( + sorted(auth_method["auth_config"].items())) + + if "extra_config" in auth_method: + extra_config = OrderedDict( + sorted(auth_method["extra_config"].items())) + + auth_methods.append( + VaultAuthMethod( + type=auth_method["type"], + path=auth_method["path"], + description=auth_method["description"], + config=OrderedDict( + sorted(auth_method["config"].items())), + auth_config=auth_config, + extra_config=extra_config + ) ) - ) - log.info('Finished loading auth methods from local config file.') + log.info('Finished populating local auth methods.') + except Exception: + raise + + return auth_methods - def configure_auth_methods(self): + def configure_auth_methods(self, client, remote_methods, local_methods, ret): log.info('Processing and configuring auth methods...') + new_auth_methods = [] - for auth_method in self.auth_methods_local: - log.debug('Checking if auth method "%s" is enabled...', - auth_method.path) - if auth_method in self.auth_methods_remote: - log.debug( - 'Auth method "%s" is already enabled. Tuning...', auth_method.path) - self.client.sys.tune_auth_method( - path=auth_method.path, - description=auth_method.description, - default_lease_ttl=auth_method.config["default_lease_ttl"], - max_lease_ttl=auth_method.config["max_lease_ttl"] - ) - log.debug('Auth method "%s" is tuned.', auth_method.type) - else: - log.debug( - 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) - self.client.sys.enable_auth_method( - method_type=auth_method.type, - path=auth_method.path, - description=auth_method.description, - config=auth_method.config - ) - log.debug('Auth method "%s" is enabled.', auth_method.type) - new_auth_methods.append(auth_method.type) - - # Provision config for specific auth method - if auth_method.auth_config: - if auth_method.type == "ldap": - log.debug('Provisioning configuration for LDAP...') - self.client.auth.ldap.configure(**auth_method.auth_config) - log.debug('Configuration for LDAP is provisioned.') - else: - log.debug( - 'Auth method "%s" does not contain any specific configurations.', auth_method.type) - - if auth_method.extra_config: - log.debug( - 'Provisioning extra configurations for auth method "%s"', auth_method.type) - # Get LDAP group mapping from vault - try: - ldap_list_group_response = self.client.auth.ldap.list_groups() + ldap_groups = [] + + try: + for auth_method in local_methods: + log.debug('Checking if auth method "%s" is enabled...', + auth_method.path) + if auth_method in remote_methods: + log.debug( + 'Auth method "%s" is already enabled. Tuning...', auth_method.path) + client.sys.tune_auth_method( + path=auth_method.path, + description=auth_method.description, + default_lease_ttl=auth_method.config["default_lease_ttl"], + max_lease_ttl=auth_method.config["max_lease_ttl"] + ) + log.debug('Auth method "%s" is tuned.', auth_method.type) + else: + log.debug( + 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) + client.sys.enable_auth_method( + method_type=auth_method.type, + path=auth_method.path, + description=auth_method.description, + config=auth_method.config + ) + log.debug('Auth method "%s" is enabled.', auth_method.type) + new_auth_methods.append(auth_method.type) + + # Provision config for specific auth method + if auth_method.auth_config: + if auth_method.type == "ldap": + log.debug('Provisioning configuration for LDAP...') + client.auth.ldap.configure(**auth_method.auth_config) + log.debug('Configuration for LDAP is provisioned.') + else: + log.debug( + 'Auth method "%s" does not contain any specific configurations.', auth_method.type) + + if auth_method.extra_config: + log.debug( + 'Provisioning extra configurations for auth method "%s"', auth_method.type) + # Get LDAP group mapping from vault + ldap_list_group_response = client.auth.ldap.list_groups() if ldap_list_group_response != None: - self.ldap_groups = ldap_list_group_response["data"]["keys"] - - except Exception as e: - log.exception(e) - - log.debug("LDAP groups from vault: %s", str(self.ldap_groups)) - - # Update LDAP group mapping - log.debug( - 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) - local_config_groups = auth_method.extra_config["group_policy_map"] - for key in local_config_groups: - log.debug('LDAP Group ["%s"] -> Policies %s', - str(key), local_config_groups[key]) - try: - self.client.auth.ldap.create_or_update_group( + ldap_groups = ldap_list_group_response["data"]["keys"] + + log.debug("LDAP groups from vault: %s", str(ldap_groups)) + + # Update LDAP group mapping + log.debug( + 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) + local_config_groups = auth_method.extra_config["group_policy_map"] + for key in local_config_groups: + log.debug('LDAP Group ["%s"] -> Policies %s', + str(key), local_config_groups[key]) + + client.auth.ldap.create_or_update_group( name=key, policies=local_config_groups[key] ) - except Exception as e: - log.exception(e) - - # Clean up LDAP group mapping - if self.ldap_groups != None: - for group in self.ldap_groups: - if group in {k.lower(): v for k, v in local_config_groups.items()}: - log.debug( - 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) - else: - log.info( - 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) - self.client.auth.ldap.delete_group( - name=group - ) - log.info( - 'LDAP group mapping ["%s"] deleted.', group) - else: - log.debug( - 'Auth method "%s" does not contain any extra configurations.', auth_method.type - ) - log.info('Finished processing and configuring auth methods...') - # Build return object - self.ret['old'] = json.loads(json.dumps( - [ob.type for ob in self.auth_methods_remote])) + # Clean up LDAP group mapping + if ldap_groups != None: + for group in ldap_groups: + if group in {k.lower(): v for k, v in local_config_groups.items()}: + log.debug( + 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) + else: + log.info( + 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) + client.auth.ldap.delete_group( + name=group + ) + log.info( + 'LDAP group mapping ["%s"] deleted.', group) + else: + log.debug( + 'Auth method "%s" does not contain any extra configurations.', auth_method.type + ) + # Build return object + ret['changes']['old'] = json.loads(json.dumps( + [ob.type for ob in remote_methods])) - if len(new_auth_methods) > 0: - self.ret['new'] = json.loads(json.dumps(new_auth_methods)) - else: - self.ret['new'] = "No changes" + if len(new_auth_methods) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_auth_methods)) + else: + ret['changes']['new'] = "No changes" + + log.info('Finished processing and configuring auth methods...') + except Exception: + raise - def cleanup_auth_methods(self): + def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): log.info('Cleaning up auth methods...') has_change = False - for auth_method in self.auth_methods_remote: - if auth_method not in self.auth_methods_local: - has_change = True - log.info( - 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) - self.client.sys.disable_auth_method( - path=auth_method.path - ) - log.info('Auth method "%s" is disabled.', auth_method.type) - log.info('Finished cleaning up auth methods.') - - if has_change: - self.ret['new'] = json.loads(json.dumps( - [ob.type for ob in self.auth_methods_local])) - - def run(self): - """ - - Returns: - dict -- results of the execution - """ - log.info('-------------------------------------') - self.client = __utils__['vault.build_client']() - self.get_remote_auth_methods() - self.get_local_auth_methods() - self.configure_auth_methods() - self.cleanup_auth_methods() - log.info('-------------------------------------') + try: + for auth_method in remote_methods: + if auth_method not in local_methods: + has_change = True + log.info( + 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) + client.sys.disable_auth_method( + path=auth_method.path + ) + log.info('Auth method "%s" is disabled.', auth_method.type) - return self.ret + log.info('Finished cleaning up auth methods.') + if has_change: + ret['changes']['new'] = json.loads(json.dumps( + [ob.type for ob in local_methods])) + except Exception: + raise class VaultSecretsManager(): """ Module for handling Vault secret engines """ - client = None - config_path = '' - remote_secret_engines = [] - local_secret_engines = [] - ret = {} - - def __init__(self, config_path): - """ - Arguments: - config_path {str} -- Path of the yaml file that contains configuration options for secrets engines - """ + def __init__(self): log.info("Initializing Vault Secret Manager...") - self.config_path = config_path - def get_remote_secrets_engines(self): + def get_remote_secrets_engines(self, client, ret): """ Retrieve secret engines from vault server """ - log.info('Retrieving secrets engines from vault') + log.info('Retrieving secrets engines from Vault') + remote_secret_engines = [] try: - secrets_engines_resp = self.client.sys.list_mounted_secrets_engines() + log.info(client) + secrets_engines_resp = client.sys.list_mounted_secrets_engines() for engine in secrets_engines_resp['data']: - self.remote_secret_engines.append( + remote_secret_engines.append( VaultSecretEngine( type=secrets_engines_resp[engine]['type'], path=(secrets_engines_resp[engine]["path"] @@ -553,225 +569,218 @@ def get_remote_secrets_engines(self): sorted(secrets_engines_resp[engine]["config"].items())) ) ) - self.remote_secret_engines.sort(key=lambda x: x.type) - except Exception as e: - log.exception(e) + remote_secret_engines.sort(key=lambda x: x.type) + except Exception: + raise + log.info('Finished retrieving secrets engines from vault.') + return remote_secret_engines - def get_local_secrets_engines(self): + def populate_local_secrets_engines(self, configs, ret): """ Retrieving secret engines from local config file """ - log.debug('Reding secret engines from config file...') + log.info('Populating local secret engines...') + local_secret_engines = [] try: - config = __utils__['vault.load_config_file']( - config_path=self.config_path) - - for secret_engine in config['secrets-engines']: + for secret_engine in configs: + config = None secret_config = None extra_config = None + if 'secret_config' in secret_engine: - secret_config = OrderedDict( - sorted(secret_engine["secret_config"].items())) + if secret_engine["secret_config"] != None: + secret_config = OrderedDict( + sorted(secret_engine["secret_config"].items())) if 'extra_config' in secret_engine: - extra_config = OrderedDict( - sorted(secret_engine["extra_config"].items())) + if secret_engine["extra_config"] != None: + extra_config = OrderedDict( + sorted(secret_engine["extra_config"].items())) + + if 'config' in secret_engine: + if secret_engine["config"] != None: + config = OrderedDict( + sorted(secret_engine["config"].items())) + + local_secret_engines.append(VaultSecretEngine( + type=secret_engine["type"], + path=secret_engine["path"], + description=secret_engine["description"], + config=config, + secret_config=secret_config, + extra_config=extra_config + )) - self.local_secret_engines.append( - VaultSecretEngine( - type=secret_engine["type"], - path=secret_engine["path"], - description=secret_engine["description"], - config=OrderedDict( - sorted(secret_engine["config"].items())), - secret_config=secret_config, - extra_config=extra_config - ) - ) - self.local_secret_engines.sort(key=lambda x: x.type) - except Exception as e: - log.exception(e) - log.debug('Finished reading secrets engines from config file.') + local_secret_engines.sort(key=lambda x: x.type) + except Exception: + raise + + log.info('Finished populating local secret engines.') + return local_secret_engines - def configure_secrets_engines(self): + def configure_secrets_engines(self, client, remote_engines, local_engines, ret): log.info('Processing and configuring secrets engines...') new_secrets_engines = [] - for secret_engine in self.local_secret_engines: - log.debug('Checking if secret engine "%s" at path "%s" is enabled...', - secret_engine.type, - secret_engine.path) - if secret_engine in self.remote_secret_engines: - log.debug( - 'Secret engine "%s" at path "%s" is already enabled. Tuning...', - secret_engine.type, - secret_engine.path) - - self.client.sys.tune_mount_configuration( - path=secret_engine.path, - description=secret_engine.description, - default_lease_ttl=secret_engine.config["default_lease_ttl"], - max_lease_ttl=secret_engine.config["max_lease_ttl"] - ) - log.debug('Secret engine "%s" at path "%s" is tuned.', - secret_engine.type, secret_engine.path) - else: - log.debug( - 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', - secret_engine.type, - secret_engine.path) - new_secrets_engines.append(secret_engine.type) - self.client.sys.enable_secrets_engine( - backend_type=secret_engine.type, - path=secret_engine.path, - description=secret_engine.description, - config=secret_engine.config - ) - log.debug('Secret engine " % s" at path " % s" is enabled.', - secret_engine.type, secret_engine.path) - - if secret_engine.secret_config != None: - log.info( - 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - self.client.secrets.activedirectory.configure( - **secret_engine.secret_config + try: + for secret_engine in local_engines: + log.debug('Checking if secret engine "%s" at path "%s" is enabled...', + secret_engine.type, + secret_engine.path) + if secret_engine in remote_engines: + log.debug( + 'Secret engine "%s" at path "%s" is already enabled. Tuning...', + secret_engine.type, + secret_engine.path) + + client.sys.tune_mount_configuration( + path=secret_engine.path, + description=secret_engine.description, + default_lease_ttl=secret_engine.config["default_lease_ttl"], + max_lease_ttl=secret_engine.config["max_lease_ttl"] ) - if secret_engine.type == 'database': - self.client.secrets.database.configure( - **secret_engine.secret_config + log.debug('Secret engine "%s" at path "%s" is tuned.', + secret_engine.type, secret_engine.path) + else: + log.debug( + 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', + secret_engine.type, + secret_engine.path) + new_secrets_engines.append(secret_engine.type) + client.sys.enable_secrets_engine( + backend_type=secret_engine.type, + path=secret_engine.path, + description=secret_engine.description, + config=secret_engine.config ) + log.debug('Secret engine " % s" at path " % s" is enabled.', + secret_engine.type, secret_engine.path) + + if secret_engine.secret_config != None: + log.info( + 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + client.secrets.activedirectory.configure( + **secret_engine.secret_config + ) + if secret_engine.type == 'database': + client.secrets.database.configure( + **secret_engine.secret_config + ) + + log.info( + 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - log.info( - 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.extra_config != None: - log.info( - 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - # Get roles from vault - existing_roles = None - try: - existing_roles = self.client.secrets.activedirectory.list_roles() - log.debug(existing_roles) - except Exception as e: - log.exception(e) - - # Add new roles - local_roles = secret_engine.extra_config['roles'] - for key in local_roles: - log.debug('AD Role ["%s"] -> Role %s', - str(key), local_roles[key]) + if secret_engine.extra_config != None: + log.info( + 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + # Get roles from vault + existing_roles = None try: - self.client.secrets.activedirectory.create_or_update_role( - name=key, - service_account_name=local_roles[key]['service_account_name'], - ttl=local_roles[key]['ttl'] - ) + existing_roles = client.secrets.activedirectory.list_roles() + log.debug(existing_roles) except Exception as e: log.exception(e) - raise salt.exceptions.SaltInvocationError(e) - # Remove missing roles - if existing_roles != None: - for role in existing_roles: - if role in {k.lower(): v for k, v in local_roles.items()}: - log.debug( - 'AD role ["%s"] exists in configuration, no cleanup necessary', role) - else: - log.info( - 'Ad role ["%s"] does not exists in configuration, deleting...', role) - self.client.secrets.activedirectory.delete_role( - name=role + # Add new roles + local_roles = secret_engine.extra_config['roles'] + for key in local_roles: + log.debug('AD Role ["%s"] -> Role %s', + str(key), local_roles[key]) + try: + client.secrets.activedirectory.create_or_update_role( + name=key, + service_account_name=local_roles[key]['service_account_name'], + ttl=local_roles[key]['ttl'] ) - log.info( - 'AD role has been ["%s"] deleted.', role) - else: - log.debug( - 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type - ) + except Exception as e: + log.exception(e) + raise salt.exceptions.SaltInvocationError(e) + + # Remove missing roles + if existing_roles != None: + for role in existing_roles: + if role in {k.lower(): v for k, v in local_roles.items()}: + log.debug( + 'AD role ["%s"] exists in configuration, no cleanup necessary', role) + else: + log.info( + 'Ad role ["%s"] does not exists in configuration, deleting...', role) + client.secrets.activedirectory.delete_role( + name=role + ) + log.info( + 'AD role has been ["%s"] deleted.', role) + else: + log.debug( + 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type + ) + except Exception: + raise + log.info('Finished proccessing and configuring secrets engines.') # Build return object - self.ret['old'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in self.remote_secret_engines])) + ret['changes']['old'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) if len(new_secrets_engines) > 0: - self.ret['new'] = json.loads(json.dumps(new_secrets_engines)) + ret['changes']['new'] = json.loads( + json.dumps(new_secrets_engines)) else: - self.ret['new'] = "No changes" + ret['changes']['new'] = "No changes" - def cleanup_secrets_engines(self): + def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): log.info('Cleaning up secrets engines...') has_changes = False - for secret_engine in self.remote_secret_engines: - if not (secret_engine.type == "system" or - secret_engine.type == "cubbyhole" or - secret_engine.type == "identity" or - secret_engine.type == "generic"): - if secret_engine in self.local_secret_engines: - log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', - secret_engine.type, secret_engine.path) - else: - log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', - secret_engine.type, secret_engine.path) - has_changes = True - self.client.sys.disable_secrets_engine( - path=secret_engine.path - ) - log.info('Secrets engine "%s" at path "%s" is disabled.', - secret_engine.type, secret_engine.type) - log.info('Finished cleaning up secrets engines.') - - if has_changes: - self.ret['new'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in self.local_secret_engines])) - def run(self): - """ + try: + for secret_engine in remote_engines: + if not (secret_engine.type == "system" or + secret_engine.type == "cubbyhole" or + secret_engine.type == "identity" or + secret_engine.type == "generic"): + if secret_engine in local_engines: + log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', + secret_engine.type, secret_engine.path) + else: + log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', + secret_engine.type, secret_engine.path) + has_changes = True + client.sys.disable_secrets_engine( + path=secret_engine.path + ) + log.info('Secrets engine "%s" at path "%s" is disabled.', + secret_engine.type, secret_engine.type) + except Exception: + raise - Returns: - dict -- results of the execution - """ - log.info('-------------------------------------') - self.client = __utils__['vault.build_client']() - self.get_remote_secrets_engines() - self.get_local_secrets_engines() - self.configure_secrets_engines() - self.cleanup_secrets_engines() - log.info('-------------------------------------') + log.info('Finished cleaning up secrets engines.') - return self.ret + if has_changes: + ret['changes']['new'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in local_engines])) class VaultAuditManager(): """ Module for handling Vault audit devices """ - client = None - remote_audit_devices = [] - local_audit_devices = [] - config_path = '' - ret = {} - - def __init__(self, config_path): - """ - Arguments: - config_path {str} -- Path of the yaml file that contains configuration options for audit devices - """ + def __init__(self): log.info("Initializing Vault Audit Manager...") - self.config_path = config_path - def get_remote_audit_devices(self): + def get_remote_audit_devices(self, client, ret): log.info("Retrieving audit devices from vault...") + devices = [] try: - audit_devices_resp = self.client.sys.list_enabled_audit_devices() + audit_devices_resp = client.sys.list_enabled_audit_devices() for device in audit_devices_resp['data']: audit_device = audit_devices_resp[device] - self.remote_audit_devices.append( + devices.append( VaultAuditDevice( type=audit_device['type'], path=(audit_device["path"] @@ -780,136 +789,92 @@ def get_remote_audit_devices(self): options=json.dumps(audit_device["options"]) ) ) - except Exception as e: - log.exception(e) - log.info('Finished retrieving audit devices from vault.') - def get_local_audit_devices(self): - log.info("Loading audit devices from local config...") - config = __utils__['vault.load_config_file']( - config_path=self.config_path) + log.info('Finished retrieving audit devices from vault.') + except Exception: + raise - if config: - for audit_device in config["audit-devices"]: - if 'options' in audit_device: - options = json.dumps(audit_device["options"]) - log.debug(options) + return devices - self.local_audit_devices.append( - VaultAuditDevice( - type=audit_device["type"], - path=audit_device["path"], - description=audit_device["description"], - options=options + def get_local_audit_devices(self, configs, ret): + log.info("Loading audit devices from local config...") + devices = [] + if configs: + try: + for audit_device in configs: + if 'options' in audit_device: + options = json.dumps(audit_device["options"]) + log.debug(options) + + devices.append( + VaultAuditDevice( + type=audit_device["type"], + path=audit_device["path"], + description=audit_device["description"], + options=options + ) ) - ) - log.info('Finished loading audit devices from local config.') - def configure_audit_devices(self): + log.info('Finished loading audit devices from local config.') + except Exception: + raise + + return devices + + def configure_audit_devices(self, client, remote_devices, local_devices, ret): log.info('Processing and configuring audit devices...') new_audit_devices = [] - for audit_device in self.local_audit_devices: - log.debug('Checking if audit device "%s" at path "%s" is enabled...', - audit_device.type, audit_device.path) - - if audit_device in self.remote_audit_devices: - log.debug('Audit device "%s" at path "%s" is already enabled.', - audit_device.type, audit_device.path) - else: - log.debug( - 'Audit device "%s" at path "%s" is not enabled. Enabling now...', audit_device.type, audit_device.path) - new_audit_devices.append(audit_device.type) - self.client.sys.enable_audit_device( - device_type=audit_device.type, - path=audit_device.path, - description=audit_device.description, - options=json.loads(audit_device.options) - ) - log.debug('Audit device "%s" at path "%s" is enabled.', + try: + for audit_device in local_devices: + log.debug('Checking if audit device "%s" at path "%s" is enabled...', audit_device.type, audit_device.path) - log.info('Finished processing audit devices.') - # Build return object - self.ret['old'] = json.loads(json.dumps( - [ob.type for ob in self.remote_audit_devices])) - - if len(new_audit_devices) > 0: - self.ret['new'] = json.loads(json.dumps(new_audit_devices)) - else: - self.ret['new'] = "No changes" - - def cleanup_audit_devices(self): - log.info('Cleaning up audit devices...') - has_changes = False - for audit_device in self.remote_audit_devices: - if audit_device not in self.local_audit_devices: - log.info('Disabling audit device "%s" at path "%s"...', - audit_device.type, audit_device.path) - has_changes = True - self.client.sys.disable_audit_device( - path=audit_device.path - ) - log.info('Finished cleaning up audit devices.') - - if has_changes: - self.ret['new'] = json.loads(json.dumps( - [ob.type for ob in self.local_audit_devices])) - - def run(self): - """ - - Returns: - dict -- results of the execution - """ - log.info('-------------------------------------') - self.client = __utils__['vault.build_client']() - self.get_remote_audit_devices() - self.get_local_audit_devices() - self.configure_audit_devices() - self.cleanup_audit_devices() - log.info('-------------------------------------') - return self.ret - - -def auth_methods_synced(config_path): - """ - Ensure all auth method defined in the config file are synced with vault - - :param config_path: path to configuration file for auth methods - :returns: Result of the execution - :rtype: dict - """ - return VaultAuthManager(config_path).run() - - -def policies_synced(policies_dir_path): - """ - Ensure all policies defined are synced with vault - - :param policies_dir_path: path to directory contains all policies - :returns: Result of the execution - :rtype: dict - """ - return VaultPolicyManager(policies_dir_path).run() - + if audit_device in remote_devices: + log.debug('Audit device "%s" at path "%s" is already enabled.', + audit_device.type, audit_device.path) + else: + log.debug( + 'Audit device "%s" at path "%s" is not enabled. Enabling now...', audit_device.type, audit_device.path) + new_audit_devices.append(audit_device.type) + client.sys.enable_audit_device( + device_type=audit_device.type, + path=audit_device.path, + description=audit_device.description, + options=json.loads(audit_device.options) + ) + log.debug('Audit device "%s" at path "%s" is enabled.', + audit_device.type, audit_device.path) -def secrets_engines_synced(config_path): - """ - Ensure all secrets engines defined in the config file are synced with vault + log.info('Finished processing audit devices.') + # Build return object + ret['changes']['old'] = json.loads(json.dumps( + [ob.type for ob in remote_devices])) - :param config_path: path to configuration file for secrets engines - :returns: Result of the execution - :rtype: dict - """ - return VaultSecretsManager(config_path).run() + if len(new_audit_devices) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_audit_devices)) + else: + ret['changes']['new'] = "No changes" + except Exception: + raise -def audit_devices_synced(config_path): - """ - Ensure all audit devices defined in the config file are synced with vault + def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): + log.info('Cleaning up audit devices...') + has_changes = False + try: + for audit_device in remote_devices: + if audit_device not in local_devices: + log.info('Disabling audit device "%s" at path "%s"...', + audit_device.type, audit_device.path) + has_changes = True + client.sys.disable_audit_device( + path=audit_device.path + ) + log.info('Finished cleaning up audit devices.') - :param config_path: path to configuration file for audit devices - :returns: Result of the execution - :rtype: dict - """ - return VaultAuditManager(config_path).run() + if has_changes: + ret['changes']['new'] = json.loads(json.dumps( + [ob.type for ob in local_devices])) + except Exception: + raise diff --git a/salt/_states/vault.py b/salt/_states/vault.py index bc3d515..8aad145 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -1,19 +1,7 @@ -# -*- coding: utf-8 -*- - from __future__ import absolute_import import logging -import os import json -import sys - -import salt.config -import salt.syspaths -import salt.utils -import salt.exceptions - -import hvac -import boto3 log = logging.getLogger(__name__) @@ -21,8 +9,9 @@ import hvac import boto3 DEPS_INSTALLED = True -except ImportError: +except ImportError as e: log.debug('Unable to import the libraries.') + log.exception(e) DEPS_INSTALLED = False __all__ = ['initialize'] @@ -35,24 +24,25 @@ def __virtual__(): def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): """ Ensure that the vault instance has been initialized and run the - initialization if it has not. + initialization if it has not. Storing the root token to SSM parameter - :param name: The id used for the state definition - :param recovery_shares: The number of recovery shares to use for the - initialization key - :param recovery_threshold: The number of recovery keys required to unseal the vault - :param ssm_path: The path to store root token in SSM Parameter store + Arguments: + name {string} -- The id used for the state definition + ssm_path {string} -- The path to SSM parameter that will store the root token - :returns: Result of the execution - :rtype: dict + Keyword Arguments: + recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) + recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) + + Returns: + ret {dict} -- Result of the execution """ ret = {'name': name, - 'comment': '', - 'result': '', - 'changes': {}} + 'comment': '', + 'result': '', + 'changes': {}} - vault_url = __utils__['vault.get_vault_url']() - client = hvac.Client(url=vault_url) + client = __utils__['vault.build_client']() is_initialized = client.sys.is_initialized() @@ -101,3 +91,198 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): ret['comment'] = 'Vault has {}initialized'.format( '' if is_success else 'failed to be ') return ret + + +def secret_engines_synced(name, configs=[]): + """ + Ensure secrets engines are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the secrets engines (default: []) + + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_secret_engines = [] + local_secret_engines = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + log.debug(json.dumps(configs)) + + secretsManager = __salt__['vault.get_secret_engines_manager']() + + try: + remote_secret_engines = secretsManager.get_remote_secrets_engines( + client, ret) + + local_secret_engines = secretsManager.populate_local_secrets_engines( + configs, ret) + + secretsManager.configure_secrets_engines( + client, + remote_secret_engines, + local_secret_engines, + ret + ) + + secretsManager.cleanup_secrets_engines( + client, + remote_secret_engines, + local_secret_engines, + ret + ) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + + return ret + + +def auth_methods_synced(name, configs=[]): + """ + Ensure authentication methods are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the authentication methods (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_auth_methods = [] + local_auth_methods = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + authsManager = __salt__['vault.get_auth_methods_manager']() + + try: + + remote_auth_methods = authsManager.get_remote_auth_methods(client, ret) + local_auth_methods = authsManager.populate_local_auth_methods( + configs, ret) + + authsManager.configure_auth_methods( + client, + remote_auth_methods, + local_auth_methods, + ret + ) + + authsManager.cleanup_auth_methods( + client, + remote_auth_methods, + local_auth_methods, + ret + ) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + + return ret + + +def policies_synced(name, policies=[]): + """ + Ensure policies are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + policies {list} -- A list of policies to by synced with Vault (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_policies = [] + local_policies = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + policiesManager = __salt__['vault.get_policies_manager']() + + try: + remote_policies = policiesManager.get_remote_policies(client, ret) + local_policies = json.loads(json.dumps(policies)) + policiesManager.push_policies( + client, remote_policies, local_policies, ret) + policiesManager.cleanup_policies( + client, remote_policies, local_policies, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + return ret + + +def audit_devices_synced(name, configs=[]): + """ + Ensures audit devices are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the audit devices (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_devices = [] + local_devices = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + auditDevicesManager = __salt__['vault.get_audit_device_manager']() + try: + + remote_devices = auditDevicesManager.get_remote_audit_devices( + client, ret) + + local_devices = auditDevicesManager.get_local_audit_devices( + configs, ret) + + auditDevicesManager.configure_audit_devices( + client, remote_devices, local_devices, ret) + + auditDevicesManager.cleanup_audit_devices( + client, remote_devices, local_devices, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + return ret diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index 93dd77d..a74ce72 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -1,15 +1,11 @@ -# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals -import base64 import logging import os import requests import json -import time import yaml import hvac -import hashlib from collections import OrderedDict from functools import wraps @@ -17,107 +13,38 @@ log = logging.getLogger(__name__) logging.getLogger("requests").setLevel(logging.WARNING) -def build_client(url=None, - token=None, - cert=None, - verify=True, - timeout=30, - proxies=None, - allow_redirects=True, - session=None): - """Instantiates and returns hvac Client class for HashiCorp’s Vault. - Keyword Arguments: - url {str} -- Base URL for the Vault instance being addressed. (default: {None}) - token {str} -- Authentication token to include in requests sent to Vault. (default: {None}) - cert {tuple} -- Certificates for use in requests sent to the Vault instance. This should be a tuple with the certificate and then key. (default: {None}) - verify {bool} -- Either a boolean to indicate whether TLS verification should be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification. (default: {True}) - timeout {int} -- The timeout value for requests sent to Vault. (default: {30}) - proxies {dict} -- Proxies to use when performing requests (default: {None}) - allow_redirects {bool} -- Whether to follow redirects when sending requests to Vault. (default: {True}) - session {request.Session} -- Optional session object to use when performing request. (default: {None}) - """ +def build_client(url=None, token=None): + vault_url = url if url != None else get_vault_url() - client = hvac.Client(url=vault_url) + client = hvac.Client( + url=vault_url, + token=token + ) - client.token = os.environ.get('VAULT_TOKEN') + if token == None: + client.token = os.environ.get('VAULT_TOKEN') return client + def get_vault_url(): ''' Returns a string consist of url and port number ''' - port = __grains__['vault']['api_port'] if __grains__['vault']['api_port'] != None else 8200 - url = "https://localhost" + port = __grains__['vault']['api_port'] if __grains__[ + 'vault']['api_port'] != None else 8200 + url = "http://localhost" return "{}:{}".format(url, port) -def load_config_file(config_path): - """Retrieve config file from provided path - - Arguments: - config_path {str} -- path to config file - Returns: - [obj] -- parsed object of the config - """ - config = None +def load_config_file(config_path): + configs = None with open(os.path.join(config_path), 'r') as fd: try: - config = yaml.load(fd) - + configs = yaml.load(fd) except yaml.YAMLError as e: log.critical("Unable to load conf file: " + str(e)) return False - return config - - -class VaultError(Exception): - def __init__(self, message=None, errors=None): - if errors: - message = ', '.join(errors) - - self.errors = errors - - super(VaultError, self).__init__(message) - - -class InvalidRequest(VaultError): - pass - - -class Unauthorized(VaultError): - pass - - -class Forbidden(VaultError): - pass - - -class InvalidPath(VaultError): - pass - - -class RateLimitExceeded(VaultError): - pass - - -class InternalServerError(VaultError): - pass - - -class VaultNotInitialized(VaultError): - pass - - -class VaultDown(VaultError): - pass - - -class UnexpectedError(VaultError): - pass - - -def vault_error(): - return VaultError + return configs diff --git a/salt/vault/sync.sls b/salt/vault/sync.sls index 2b13e7b..9b428a9 100644 --- a/salt/vault/sync.sls +++ b/salt/vault/sync.sls @@ -1,23 +1,58 @@ {% from "vault/map.jinja" import vault with context %} -Sync Vault Policies: - module.run: - - vault.policies_synced: - - policies_dir_path: "{{ vault.config_dir_path }}/policies" +vault_logs_dir: + file.directory: + - name: /etc/vault/logs + - user: vault + - group: vault + - mode: '0755' -Sync Vault Authentication Methods: - module.run: - - vault.auth_methods_synced: - - config_path: "{{ vault.config_dir_path }}/auth_config.yml" - - required: - - module.run: Sync Vault Policies +sync_secrets_engines: + vault.secret_engines_synced: + - configs: + - type: kv + path: services + description: Sevices specific folders + config: + default_lease_ttl: 1800 + max_lease_ttl: 1800 + - type: database + path: db1 + description: database secrets mount + config: + default_lease_ttl: 30m + max_lease_ttl: 60m -Sync Vault Secrets Engines: - module.run: - - vault.secrets_engines_synced: - - config_path: "{{ vault.config_dir_path }}/secrets_config.yml" +sync_authentication_methods: + vault.auth_methods_synced: + - configs: + - type: token + path: token + description: token based credentials + config: + default_lease_ttl: 0 + max_lease_ttl: 0 -Sync Vault Audit Devices: - module.run: - - vault.audit_devices_synced: - - config_path: "{{ vault.config_dir_path }}/audit_config.yml" +sync_audit_devices: + vault.audit_devices_synced: + - configs: + - type: file + path: file_log + description: first audit device + options: + file_path: /etc/vault/logs/audit.log + +sync_policies: + vault.policies_synced: + - policies: + - name: xyz_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create, update, delete, list]} + + - name: abc_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create]} diff --git a/scripts/appscript.sh b/scripts/appscript.sh index 3f69556..ceaa2ed 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -3,10 +3,8 @@ set -eu -o pipefail # Required vars SALT_ARCHIVE=${salt_content_archive} -CONFIGS_ARCHIVE=${vault_config_archive} SALT_DIR="/srv/salt" ARCHIVE_FILE_NAME="salt_formula.zip" -CONFIGS_FILE_NAME="vault_configs.zip" # Standard aws envs export AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') @@ -25,9 +23,6 @@ unzip $ARCHIVE_FILE_NAME -d $SALT_DIR echo "[appscript]: Remove salt formula archive file $ARCHIVE_FILE_NAME" rm $ARCHIVE_FILE_NAME -echo "[appscript]: Updating salt grains..." -salt-call --local saltutil.sync_grains - echo "[appscript]: Configuring salt to read ec2 metadata into grains..." echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf @@ -40,7 +35,7 @@ printf 'use_superseded:\n - module.run\n' >> /etc/salt/minion echo "[appscript]: Print out salt versions report" salt-call --local --versions-report -echo "[appscript]: Updating salt states to include custom vault's states/modules..." +echo "[appscript]: Updating salt states/modules/utils/grains..." salt-call --local saltutil.sync_all echo "[appscript]: Retrieving path for directory storing log files..." @@ -55,32 +50,10 @@ salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee $LOGS echo "[appscript]: Initializing vault..." salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee $LOGS_DIR/initialize.log -# Applying configurations per specific implementation -if [ "$CONFIGS_ARCHIVE" != "" ]; -then - echo "[appscript]: Retrieving root token to assist configuration provisioning..." - export SSM_PATH=$(salt-call --local grains.get 'vault:ssm_path' --output=json | jq .[] -r) - export CONFIGURATION_PATH=$(salt-call --local grains.get 'vault:config_dir_path' --output=json | jq .[] -r) - export VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') - - echo "[appscript]: Ensuring default vault configs location exists, $CONFIGURATION_PATH..." - mkdir -p $CONFIGURATION_PATH - - echo "[appscript]: Download vault configs archive file from s3://$CONFIGS_ARCHIVE..." - aws s3 cp "s3://$CONFIGS_ARCHIVE" $CONFIGS_FILE_NAME - - echo "[appscript]: Unzip vault configs archive file to $CONFIGURATION_PATH..." - unzip $CONFIGS_FILE_NAME -d $CONFIGURATION_PATH - - echo "[appscript]: Remove vault configs archive file $CONFIGS_FILE_NAME" - rm $CONFIGS_FILE_NAME - - echo "[appscript]: Sync configurations with the vault..." - salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_DIR/sync_config.log - -else - echo "[appscript]: No vault configurations provided. Skipping configuration vault step..." -fi +echo "[appscript]: Sync configurations with the vault..." +export SSM_PATH=$(salt-call --local grains.get 'vault:ssm_path' --output=json | jq .[] -r) +export VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') +salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_DIR/sync_config.log echo "[appscript]: Retrieving Vault's status" # Vault local address From b5095ecc446ab6db65358850471d9cc83836d9ac Mon Sep 17 00:00:00 2001 From: Triet Le Date: Tue, 10 Sep 2019 08:21:02 -0400 Subject: [PATCH 19/34] Updates states to leaverage jinja templating engine --- salt/vault/configure.sls | 11 ++++++ salt/vault/files/server.hcl.jinja | 22 ++++++------ salt/vault/files/vault.conf.jinja | 9 +---- salt/vault/files/vault.service.jinja | 3 +- salt/vault/install.sls | 51 +++++----------------------- salt/vault/service.sls | 9 +++-- 6 files changed, 38 insertions(+), 67 deletions(-) diff --git a/salt/vault/configure.sls b/salt/vault/configure.sls index 13b8730..a1943a5 100644 --- a/salt/vault/configure.sls +++ b/salt/vault/configure.sls @@ -9,6 +9,17 @@ vault_configure_service_file: - source: salt://vault/files/server.hcl.jinja - name: /etc/vault/conf.d/server.hcl - template: jinja + - defaults: + ip_address: {{ grains.ip_interfaces.eth0.0 }} + api_port: {{ vault.api_port }} + cluster_port: {{ vault.cluster_port }} + region: {{ vault.region }} + dynamodb_table: {{ vault.dynamodb_table }} + kms_key_id: {{ vault.kms_key_id }} + listener_address: {{ vault.listener_address }} + listener_tls_disable: {{ vault.listener_tls_disable }} + default_lease_ttl: {{ vault.default_lease_ttl }} + max_lease_ttl: {{ vault.max_lease_ttl }} - user: root - group: root - mode: '0755' diff --git a/salt/vault/files/server.hcl.jinja b/salt/vault/files/server.hcl.jinja index 1f3dd93..9d74566 100644 --- a/salt/vault/files/server.hcl.jinja +++ b/salt/vault/files/server.hcl.jinja @@ -1,24 +1,24 @@ -{%- from "vault/map.jinja" import vault with context -%} - -api_addr = "http://{{ grains['ip_interfaces']['eth0'][0] }}:{{ vault.api_port }}" +api_addr = "http://{{ ip_address }}:{{ api_port }}" +cluster_addr = "http://{{ ip_address }}:{{ cluster_port }}" backend "dynamodb" { - region = "{{ vault.region }}" - table = "{{ vault.dynamodb_table }}" + region = "{{ region }}" + table = "{{ dynamodb_table }}" ha_enabled = "true" } seal "awskms" { - region = "{{ vault.region }}" - kms_key_id = "{{ vault.kms_key_id }}" + region = "{{ region }}" + kms_key_id = "{{ kms_key_id }}" } listener "tcp" { - address = "{{ vault.listener_address }}:{{ vault.api_port }}" - tls_disable = {{ vault.listener_tls_disable }} + address = "{{ listener_address }}:{{ api_port }}" + cluster_address = "{{ listener_address }}:{{ cluster_port }}" + tls_disable = {{ listener_tls_disable }} } -default_lease_ttl = "{{ vault.default_lease_ttl }}" -max_lease_ttl = "{{ vault.max_lease_ttl }}" +default_lease_ttl = "{{ default_lease_ttl }}" +max_lease_ttl = "{{ max_lease_ttl }}" ui = true diff --git a/salt/vault/files/vault.conf.jinja b/salt/vault/files/vault.conf.jinja index d328ddf..3b181c6 100644 --- a/salt/vault/files/vault.conf.jinja +++ b/salt/vault/files/vault.conf.jinja @@ -1,4 +1,3 @@ -{%- from "vault/map.jinja" import vault with context -%} description "Vault server" start on (runlevel [345] and started network) @@ -14,11 +13,5 @@ script # Make sure to use all our CPUs, because Vault can block a scheduler thread export GOMAXPROCS=`nproc` - exec /usr/local/bin/vault server \ -{%- if vault.dev_mode %} - -dev \ -{% else %} - -config="/etc/vault/conf.d/server.hcl" \ -{% endif -%} - >>/var/log/vault.log 2>&1 + exec /usr/local/bin/vault server {{ config }} >>/var/log/vault.log 2>&1 end script diff --git a/salt/vault/files/vault.service.jinja b/salt/vault/files/vault.service.jinja index 940d895..b1fc94b 100644 --- a/salt/vault/files/vault.service.jinja +++ b/salt/vault/files/vault.service.jinja @@ -1,4 +1,3 @@ -{%- from "vault/map.jinja" import vault with context -%} [Unit] Description=Vault secret management tool Requires=network-online.target @@ -8,7 +7,7 @@ After=network-online.target User=vault Group=vault PIDFile=/var/run/vault/vault.pid -ExecStart=/usr/local/bin/vault server {% if vault.dev_mode %} -dev {% else %} -config=/etc/vault/conf.d {% endif %} +ExecStart=/usr/local/bin/vault server {{ config }} ExecReload=/bin/kill -HUP $MAINPID KillMode=process KillSignal=SIGTERM diff --git a/salt/vault/install.sls b/salt/vault/install.sls index 8ea40d7..c063847 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -21,55 +21,20 @@ vault_data_dir: - group: vault - mode: '0755' -vault_logs_dir: - file.directory: - - name: /etc/vault/logs - - user: vault - - group: vault - - mode: '0755' - -vault_package_install_file_directory: - file.directory: - - name: /opt/vault/bin - - makedirs: True - -vault_package_install_file_managed: - file.managed: - - name: /opt/vault/{{ vault.version }}_SHA256SUMS - - source: {{ vault.repo_base_url }}/{{ vault.version }}/vault_{{ vault.version }}_SHA256SUMS - - skip_verify: True - - makedirs: True - -vault_package_install_archive_extracted: +install_vault_binary: archive.extracted: - - name: /opt/vault/bin + - name: /usr/local/bin/ - source: {{ vault.repo_base_url }}/{{ vault.version }}/vault_{{ vault.version }}_{{ vault.platform }}.zip - source_hash: {{ vault.repo_base_url }}/{{ vault.version }}/vault_{{ vault.version }}_SHA256SUMS - - source_hash_name: vault_{{ vault.version }}_{{ vault.platform }}.zip - archive_format: zip + - if_missing: /usr/local/bin/vault + - source_hash_update: True - enforce_toplevel: False - - overwrite: True - - onchanges: - - file: vault_package_install_file_managed - -vault_package_install_service_dead: - service.dead: - - name: vault - - onchanges: - - file: vault_package_install_file_managed - - onlyif: test -f /etc/systemd/system/vault.service - -vault_package_install_file_symlink: - file.symlink: + file.managed: - name: /usr/local/bin/vault - - target: /opt/vault/bin/vault - - force: true - -vault_package_install_cmd_run: - cmd.run: - - name: setcap cap_ipc_lock=+ep /opt/vault/bin/vault - - onchanges: - - archive: vault_package_install_archive_extracted + - mode: '0755' + - require: + - archive: install_vault_binary install_package_dependencies: pkg.installed: diff --git a/salt/vault/service.sls b/salt/vault/service.sls index d386160..99fe944 100644 --- a/salt/vault/service.sls +++ b/salt/vault/service.sls @@ -9,6 +9,12 @@ vault_service_init_file_managed: - name: {{ vault.service.path }} - source: {{ vault.service.source }} - template: jinja + - defaults: +{%- if vault.dev_mode %} + config: -dev -dev-root-token-id=root -config /srv/salt/vault/files/server.dev.hcl +{% else %} + config: -config=/etc/vault/conf.d +{% endif -%} vault_service_running: service.running: @@ -17,6 +23,3 @@ vault_service_running: - reload: True - require: - selinux: manage_selinux_mode - - watch: - - archive: vault_package_install_archive_extracted - - file: vault_configure_service_file From 361668608f3b78e84789538dec3ffb077ceba007 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Tue, 10 Sep 2019 08:56:09 -0400 Subject: [PATCH 20/34] Fixes format to conform with editorconfig --- .editorconfig | 4 +- main.tf | 18 +- modules/iam/outputs.tf | 1 - outputs.tf | 1 - salt/_modules/vault.py | 1630 +++++++++++++++++----------------- salt/_states/vault.py | 494 +++++------ salt/_utils/vault.py | 46 +- salt/vault/install.sls | 1 - tests/vault-py2/variables.tf | 1 - tests/vault-py3/variables.tf | 1 - 10 files changed, 1096 insertions(+), 1101 deletions(-) diff --git a/.editorconfig b/.editorconfig index a2f7c68..5134e9c 100755 --- a/.editorconfig +++ b/.editorconfig @@ -13,8 +13,8 @@ charset = utf-8 trim_trailing_whitespace = false [*.py] -indent_style = space -indent_size = 2 +indent_style = tab +indent_size = 4 [*.go] indent_style = tab diff --git a/main.tf b/main.tf index c8cff50..2f64fe2 100644 --- a/main.tf +++ b/main.tf @@ -100,15 +100,15 @@ data "template_file" "appscript" { salt_content_archive = local.s3_salt_vault_content salt_grains_json = join("", ["'", jsonencode({ - api_port = var.api_port - cluster_port = var.cluster_port - dynamodb_table = local.dynamodb_table - inbound_cidrs = concat(var.inbound_cidrs, local.default_inbound_cdirs) - kms_key_id = local.kms_key_id - logs_path = local.logs_path - region = data.aws_region.current.name - ssm_path = local.ssm_root_path - version = var.vault_version + api_port = var.api_port + cluster_port = var.cluster_port + dynamodb_table = local.dynamodb_table + inbound_cidrs = concat(var.inbound_cidrs, local.default_inbound_cdirs) + kms_key_id = local.kms_key_id + logs_path = local.logs_path + region = data.aws_region.current.name + ssm_path = local.ssm_root_path + version = var.vault_version }), "'"]) } } diff --git a/modules/iam/outputs.tf b/modules/iam/outputs.tf index a784e0f..e25ecdd 100644 --- a/modules/iam/outputs.tf +++ b/modules/iam/outputs.tf @@ -5,4 +5,3 @@ output "profile_name" { value = aws_iam_instance_profile.instance.name } - diff --git a/outputs.tf b/outputs.tf index 9a7a3cd..c3bd94c 100644 --- a/outputs.tf +++ b/outputs.tf @@ -2,4 +2,3 @@ output "vault_url" { description = "URL to access Vault UI" value = join("", ["https://", aws_route53_record.this.fqdn]) } - diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index b4849b7..9bdbaf0 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -15,866 +15,866 @@ log = logging.getLogger(__name__) try: - import hvac - DEPS_INSTALLED = True + import hvac + DEPS_INSTALLED = True except ImportError as e: - log.debug('Unable to import the dependencies...') - log.exception(e) - DEPS_INSTALLED = False + log.debug('Unable to import the dependencies...') + log.exception(e) + DEPS_INSTALLED = False class InsufficientParameters(Exception): - pass + pass def __virtual__(): - return DEPS_INSTALLED + return DEPS_INSTALLED def get_policies_manager(): - """ - Retrieve an object containing helper methods for the policy manager + """ + Retrieve an object containing helper methods for the policy manager - Returns: - [VaultPolicyManager] -- Policy Manager - """ - return VaultPolicyManager() + Returns: + [VaultPolicyManager] -- Policy Manager + """ + return VaultPolicyManager() def get_secret_engines_manager(): - """ - Retrieve an object containing helper methods for the secrets engines manager + """ + Retrieve an object containing helper methods for the secrets engines manager - Returns: - [VaultSecretsManager] -- Secrets Engines Manager - """ - return VaultSecretsManager() + Returns: + [VaultSecretsManager] -- Secrets Engines Manager + """ + return VaultSecretsManager() def get_auth_methods_manager(): - """[summary] - Retrieve an object containing helper methods for the auth methods manager + """[summary] + Retrieve an object containing helper methods for the auth methods manager - Returns: - [VaultAuthManager] -- Auth Methods Manager - """ - return VaultAuthManager() + Returns: + [VaultAuthManager] -- Auth Methods Manager + """ + return VaultAuthManager() def get_audit_device_manager(): - """[summary] - Retrieve an object containing helper methods for the audit device manager + """[summary] + Retrieve an object containing helper methods for the audit device manager - Returns: - [VaultAuditManager] -- Audit Device Manager - """ - return VaultAuditManager() + Returns: + [VaultAuditManager] -- Audit Device Manager + """ + return VaultAuditManager() class VaultAuthMethod: - type = None - path = None - description = None - config = None - auth_config = None - extra_config = None - - def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): - """ - Instanciate class - - :param type: Authentication type - :type type: str - :param path: Authentication mount point - :type path: str - :param description: Authentication description - :type description: str - :param config: Authentication config - :type config: dict - :param auth_config: Authentification specific configuration - :type auth_config: dict - :param extra_config: Extra Authentification configurations - :type extra_config: dict - """ - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.config = {} - for elem in config: - if config[elem] != "": - self.config[elem] = config[elem] - self.auth_config = auth_config - self.extra_config = extra_config - - def get_unique_id(self): - """ - Return a unique hash by auth method only using the type and path - - :return: str - """ - unique_str = str(self.type + self.path) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash - - def get_tuning_hash(self): - """ - Return a unique ID per tuning configuration - - :return: str - """ - conf_str = self.description + str(self.config) - sha256_hash = hashlib.sha256(conf_str.encode()).hexdigest() - return sha256_hash - - def __eq__(self, other): - return self.get_unique_id() == other.get_unique_id() - - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + type = None + path = None + description = None + config = None + auth_config = None + extra_config = None + + def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): + """ + Instanciate class + + :param type: Authentication type + :type type: str + :param path: Authentication mount point + :type path: str + :param description: Authentication description + :type description: str + :param config: Authentication config + :type config: dict + :param auth_config: Authentification specific configuration + :type auth_config: dict + :param extra_config: Extra Authentification configurations + :type extra_config: dict + """ + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.config = {} + for elem in config: + if config[elem] != "": + self.config[elem] = config[elem] + self.auth_config = auth_config + self.extra_config = extra_config + + def get_unique_id(self): + """ + Return a unique hash by auth method only using the type and path + + :return: str + """ + unique_str = str(self.type + self.path) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def get_tuning_hash(self): + """ + Return a unique ID per tuning configuration + + :return: str + """ + conf_str = self.description + str(self.config) + sha256_hash = hashlib.sha256(conf_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_unique_id() == other.get_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.config), + self.get_unique_id())) class VaultSecretEngine: - """ - Vault secrete engine container - """ - type = None - path = None - description = None - config = None - secret_config = None - extra_config = None - - def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): - """ - Instantiate Class - - :param type: Secret type - :type type: str - :param path: Secret mount point - :type path: str - :param description: Secret description - :type description: str - :param config: Secret basic config - :type config: dict - :param secret_config: Secret specific configuration - :type secret_config: dict - :param extra_config: Secret extra configuration - :type extra_config: dict - """ - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.config = dict() - self.config["force_no_cache"] = False - for elem in config: - if config[elem] != "": - self.config[elem] = config[elem] - self.secret_config = secret_config - self.extra_config = extra_config - - def get_unique_id(self): - """ - Return a unique hash by secret engine only using the type and path - - :return: str - """ - unique_str = str(self.type + self.path) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash - - def __eq__(self, other): - return self.get_unique_id() == other.get_unique_id() - - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + """ + Vault secrete engine container + """ + type = None + path = None + description = None + config = None + secret_config = None + extra_config = None + + def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): + """ + Instantiate Class + + :param type: Secret type + :type type: str + :param path: Secret mount point + :type path: str + :param description: Secret description + :type description: str + :param config: Secret basic config + :type config: dict + :param secret_config: Secret specific configuration + :type secret_config: dict + :param extra_config: Secret extra configuration + :type extra_config: dict + """ + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.config = dict() + self.config["force_no_cache"] = False + for elem in config: + if config[elem] != "": + self.config[elem] = config[elem] + self.secret_config = secret_config + self.extra_config = extra_config + + def get_unique_id(self): + """ + Return a unique hash by secret engine only using the type and path + + :return: str + """ + unique_str = str(self.type + self.path) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_unique_id() == other.get_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.config), + self.get_unique_id())) class VaultAuditDevice: - type = None - path = None - description = None - options = None + type = None + path = None + description = None + options = None - def __init__(self, type, path, description, options): - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.options = options + def __init__(self, type, path, description, options): + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.options = options - def get_device_unique_id(self): - unique_str = str(self.type + self.path + - self.description + str(self.options)) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash + def get_device_unique_id(self): + unique_str = str(self.type + self.path + + self.description + str(self.options)) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash - def __eq__(self, other): - return self.get_device_unique_id() == other.get_device_unique_id() + def __eq__(self, other): + return self.get_device_unique_id() == other.get_device_unique_id() - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.options), - self.get_device_unique_id())) + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.options), + self.get_device_unique_id())) class VaultPolicyManager(): - """ - Module for managing policies within Vault - """ - - def __init__(self): - log.info("Initializing Vault Policy Manager...") - - def get_remote_policies(self, client, ret): - """ - Reading policies from configs folder - """ - log.info('Retrieving policies from vault...') - polices = [] - try: - policies_resp = client.sys.list_policies() - - for policy in policies_resp['data']['policies']: - if not (policy == 'root' or policy == 'default'): - polices.append(policy) - - log.debug('Current policies: %s' % - ', '.join(polices)) - log.info('Finished retrieving policies from vault.') - - except Exception as e: - ret['result'] = False - log.exception(e) - - return polices - - def load_local_policies(self, policy_dir, ret): - """ - Reading policies from configs folder - """ - log.info('Loading policies from local config folder...') - policies = [] - try: - for policy_file in glob.iglob(os.path.join(policy_dir, "*.hcl")): - name = os.path.splitext(os.path.basename(policy_file))[0] - prefix = policy_file.split(os.sep)[-2] - log.debug("Local policy %s - prefix: %s - name: %s found" - % (policy_file, prefix, name)) - - with open(policy_file, 'r') as fd: - policies.append({ - "name": name, - "content": fd.read() - }) - - log.info('Finished loading policies local config folder.') - except Exception: - raise - - return policies - - def push_policies(self, client, remote_policies, local_policies, ret): - """ - Sync policies from configs folder to vault - """ - log.info('Pushing policies from local config folder to vault...') - new_policies = [] - try: - for policy in local_policies: - client.sys.create_or_update_policy( - name=policy['name'], - policy=policy['content'] - ) - if policy['name'] in remote_policies: - log.debug('Policy "%s" has been updated.', policy["name"]) - else: - new_policies.append(policy["name"]) - log.debug('Policy "%s" has been created.', policy["name"]) - - log.info('Finished pushing policies local config folder to vault.') - - # Build return object - ret['changes']['old'] = remote_policies - if len(new_policies) > 0: - ret['changes']['new'] = json.loads(json.dumps(new_policies)) - else: - ret['changes']['new'] = "No changes" - except Exception as e: - ret['result'] = False - log.exception(e) - - def cleanup_policies(self, client, remote_policies, local_policies, ret): - """ - Cleaning up policies - """ - log.info('Cleaning up vault policies...') - has_change = False - try: - for policy in remote_policies: - if policy not in [pol['name'] for pol in local_policies]: - log.debug( - '"%s" is not found in configs folder. Removing it from vault...', policy) - has_change = True - client.sys.delete_policy(name=policy) - log.debug('"%s" is removed.', policy) - - if has_change: - ret['change']['new'] = json.loads(json.dumps( - [ob['name'] for ob in local_policies])) - - log.info('Finished cleaning up vault policies.') - except Exception as e: - ret['result'] = False - log.exception(e) - - def sync(self, client, policy_dir, ret): - - log.info('-------------------------------------') - - remote_policies = [] - local_policies = [] - - if client == None: - client = __utils__['vault.build_client']() - try: - remote_policies = self.get_remote_policies(client, ret) - local_policies = self.get_local_policies(policy_dir, ret) - self.push_policies(client, remote_policies, local_policies, ret) - self.cleanup_policies(client, remote_policies, local_policies, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - log.info('-------------------------------------') - return ret + """ + Module for managing policies within Vault + """ + + def __init__(self): + log.info("Initializing Vault Policy Manager...") + + def get_remote_policies(self, client, ret): + """ + Reading policies from configs folder + """ + log.info('Retrieving policies from vault...') + polices = [] + try: + policies_resp = client.sys.list_policies() + + for policy in policies_resp['data']['policies']: + if not (policy == 'root' or policy == 'default'): + polices.append(policy) + + log.debug('Current policies: %s' % + ', '.join(polices)) + log.info('Finished retrieving policies from vault.') + + except Exception as e: + ret['result'] = False + log.exception(e) + + return polices + + def load_local_policies(self, policy_dir, ret): + """ + Reading policies from configs folder + """ + log.info('Loading policies from local config folder...') + policies = [] + try: + for policy_file in glob.iglob(os.path.join(policy_dir, "*.hcl")): + name = os.path.splitext(os.path.basename(policy_file))[0] + prefix = policy_file.split(os.sep)[-2] + log.debug("Local policy %s - prefix: %s - name: %s found" + % (policy_file, prefix, name)) + + with open(policy_file, 'r') as fd: + policies.append({ + "name": name, + "content": fd.read() + }) + + log.info('Finished loading policies local config folder.') + except Exception: + raise + + return policies + + def push_policies(self, client, remote_policies, local_policies, ret): + """ + Sync policies from configs folder to vault + """ + log.info('Pushing policies from local config folder to vault...') + new_policies = [] + try: + for policy in local_policies: + client.sys.create_or_update_policy( + name=policy['name'], + policy=policy['content'] + ) + if policy['name'] in remote_policies: + log.debug('Policy "%s" has been updated.', policy["name"]) + else: + new_policies.append(policy["name"]) + log.debug('Policy "%s" has been created.', policy["name"]) + + log.info('Finished pushing policies local config folder to vault.') + + # Build return object + ret['changes']['old'] = remote_policies + if len(new_policies) > 0: + ret['changes']['new'] = json.loads(json.dumps(new_policies)) + else: + ret['changes']['new'] = "No changes" + except Exception as e: + ret['result'] = False + log.exception(e) + + def cleanup_policies(self, client, remote_policies, local_policies, ret): + """ + Cleaning up policies + """ + log.info('Cleaning up vault policies...') + has_change = False + try: + for policy in remote_policies: + if policy not in [pol['name'] for pol in local_policies]: + log.debug( + '"%s" is not found in configs folder. Removing it from vault...', policy) + has_change = True + client.sys.delete_policy(name=policy) + log.debug('"%s" is removed.', policy) + + if has_change: + ret['change']['new'] = json.loads(json.dumps( + [ob['name'] for ob in local_policies])) + + log.info('Finished cleaning up vault policies.') + except Exception as e: + ret['result'] = False + log.exception(e) + + def sync(self, client, policy_dir, ret): + + log.info('-------------------------------------') + + remote_policies = [] + local_policies = [] + + if client == None: + client = __utils__['vault.build_client']() + try: + remote_policies = self.get_remote_policies(client, ret) + local_policies = self.get_local_policies(policy_dir, ret) + self.push_policies(client, remote_policies, local_policies, ret) + self.cleanup_policies(client, remote_policies, local_policies, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + log.info('-------------------------------------') + return ret class VaultAuthManager(): - """ - Module for managing Vault Authentication Methods - """ - - def __init__(self): - log.info("Initializing Vault Auth Manager...") - - def get_remote_auth_methods(self, client, ret): - """ - Retrieve auth methods from vault - """ - log.info('Retrieving auth methods from Vault...') - auth_resp = client.sys.list_auth_methods() - - auth_methods = [] - try: - for auth_method in auth_resp['data']: - auth_methods.append( - VaultAuthMethod( - type=auth_resp[auth_method]['type'], - path=(auth_resp[auth_method]["path"] - if 'path' in auth_resp[auth_method] else auth_method), - description=auth_resp[auth_method]["description"], - config=OrderedDict( - sorted(auth_resp[auth_method]["config"].items())) - ) - ) - except Exception: - raise - - log.info('Finished retrieving auth methods from vault.') - return auth_methods - - def populate_local_auth_methods(self, configs, ret): - log.info('Populating local auth methods...') - - auth_methods = [] - try: - for auth_method in configs: - auth_config = None - extra_config = None - - if "auth_config" in auth_method: - auth_config = OrderedDict( - sorted(auth_method["auth_config"].items())) - - if "extra_config" in auth_method: - extra_config = OrderedDict( - sorted(auth_method["extra_config"].items())) - - auth_methods.append( - VaultAuthMethod( - type=auth_method["type"], - path=auth_method["path"], - description=auth_method["description"], - config=OrderedDict( - sorted(auth_method["config"].items())), - auth_config=auth_config, - extra_config=extra_config - ) - ) - log.info('Finished populating local auth methods.') - except Exception: - raise - - return auth_methods - - def configure_auth_methods(self, client, remote_methods, local_methods, ret): - log.info('Processing and configuring auth methods...') - - new_auth_methods = [] - ldap_groups = [] - - try: - for auth_method in local_methods: - log.debug('Checking if auth method "%s" is enabled...', - auth_method.path) - if auth_method in remote_methods: - log.debug( - 'Auth method "%s" is already enabled. Tuning...', auth_method.path) - client.sys.tune_auth_method( - path=auth_method.path, - description=auth_method.description, - default_lease_ttl=auth_method.config["default_lease_ttl"], - max_lease_ttl=auth_method.config["max_lease_ttl"] - ) - log.debug('Auth method "%s" is tuned.', auth_method.type) - else: - log.debug( - 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) - client.sys.enable_auth_method( - method_type=auth_method.type, - path=auth_method.path, - description=auth_method.description, - config=auth_method.config - ) - log.debug('Auth method "%s" is enabled.', auth_method.type) - new_auth_methods.append(auth_method.type) - - # Provision config for specific auth method - if auth_method.auth_config: - if auth_method.type == "ldap": - log.debug('Provisioning configuration for LDAP...') - client.auth.ldap.configure(**auth_method.auth_config) - log.debug('Configuration for LDAP is provisioned.') - else: - log.debug( - 'Auth method "%s" does not contain any specific configurations.', auth_method.type) - - if auth_method.extra_config: - log.debug( - 'Provisioning extra configurations for auth method "%s"', auth_method.type) - # Get LDAP group mapping from vault - ldap_list_group_response = client.auth.ldap.list_groups() - if ldap_list_group_response != None: - ldap_groups = ldap_list_group_response["data"]["keys"] - - log.debug("LDAP groups from vault: %s", str(ldap_groups)) - - # Update LDAP group mapping - log.debug( - 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) - local_config_groups = auth_method.extra_config["group_policy_map"] - for key in local_config_groups: - log.debug('LDAP Group ["%s"] -> Policies %s', - str(key), local_config_groups[key]) - - client.auth.ldap.create_or_update_group( - name=key, - policies=local_config_groups[key] - ) - - # Clean up LDAP group mapping - if ldap_groups != None: - for group in ldap_groups: - if group in {k.lower(): v for k, v in local_config_groups.items()}: - log.debug( - 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) - else: - log.info( - 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) - client.auth.ldap.delete_group( - name=group - ) - log.info( - 'LDAP group mapping ["%s"] deleted.', group) - else: - log.debug( - 'Auth method "%s" does not contain any extra configurations.', auth_method.type - ) - # Build return object - ret['changes']['old'] = json.loads(json.dumps( - [ob.type for ob in remote_methods])) - - if len(new_auth_methods) > 0: - ret['changes']['new'] = json.loads( - json.dumps(new_auth_methods)) - else: - ret['changes']['new'] = "No changes" - - log.info('Finished processing and configuring auth methods...') - except Exception: - raise - - def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): - log.info('Cleaning up auth methods...') - has_change = False - - try: - for auth_method in remote_methods: - if auth_method not in local_methods: - has_change = True - log.info( - 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) - client.sys.disable_auth_method( - path=auth_method.path - ) - log.info('Auth method "%s" is disabled.', auth_method.type) - - log.info('Finished cleaning up auth methods.') - if has_change: - ret['changes']['new'] = json.loads(json.dumps( - [ob.type for ob in local_methods])) - except Exception: - raise + """ + Module for managing Vault Authentication Methods + """ + + def __init__(self): + log.info("Initializing Vault Auth Manager...") + + def get_remote_auth_methods(self, client, ret): + """ + Retrieve auth methods from vault + """ + log.info('Retrieving auth methods from Vault...') + auth_resp = client.sys.list_auth_methods() + + auth_methods = [] + try: + for auth_method in auth_resp['data']: + auth_methods.append( + VaultAuthMethod( + type=auth_resp[auth_method]['type'], + path=(auth_resp[auth_method]["path"] + if 'path' in auth_resp[auth_method] else auth_method), + description=auth_resp[auth_method]["description"], + config=OrderedDict( + sorted(auth_resp[auth_method]["config"].items())) + ) + ) + except Exception: + raise + + log.info('Finished retrieving auth methods from vault.') + return auth_methods + + def populate_local_auth_methods(self, configs, ret): + log.info('Populating local auth methods...') + + auth_methods = [] + try: + for auth_method in configs: + auth_config = None + extra_config = None + + if "auth_config" in auth_method: + auth_config = OrderedDict( + sorted(auth_method["auth_config"].items())) + + if "extra_config" in auth_method: + extra_config = OrderedDict( + sorted(auth_method["extra_config"].items())) + + auth_methods.append( + VaultAuthMethod( + type=auth_method["type"], + path=auth_method["path"], + description=auth_method["description"], + config=OrderedDict( + sorted(auth_method["config"].items())), + auth_config=auth_config, + extra_config=extra_config + ) + ) + log.info('Finished populating local auth methods.') + except Exception: + raise + + return auth_methods + + def configure_auth_methods(self, client, remote_methods, local_methods, ret): + log.info('Processing and configuring auth methods...') + + new_auth_methods = [] + ldap_groups = [] + + try: + for auth_method in local_methods: + log.debug('Checking if auth method "%s" is enabled...', + auth_method.path) + if auth_method in remote_methods: + log.debug( + 'Auth method "%s" is already enabled. Tuning...', auth_method.path) + client.sys.tune_auth_method( + path=auth_method.path, + description=auth_method.description, + default_lease_ttl=auth_method.config["default_lease_ttl"], + max_lease_ttl=auth_method.config["max_lease_ttl"] + ) + log.debug('Auth method "%s" is tuned.', auth_method.type) + else: + log.debug( + 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) + client.sys.enable_auth_method( + method_type=auth_method.type, + path=auth_method.path, + description=auth_method.description, + config=auth_method.config + ) + log.debug('Auth method "%s" is enabled.', auth_method.type) + new_auth_methods.append(auth_method.type) + + # Provision config for specific auth method + if auth_method.auth_config: + if auth_method.type == "ldap": + log.debug('Provisioning configuration for LDAP...') + client.auth.ldap.configure(**auth_method.auth_config) + log.debug('Configuration for LDAP is provisioned.') + else: + log.debug( + 'Auth method "%s" does not contain any specific configurations.', auth_method.type) + + if auth_method.extra_config: + log.debug( + 'Provisioning extra configurations for auth method "%s"', auth_method.type) + # Get LDAP group mapping from vault + ldap_list_group_response = client.auth.ldap.list_groups() + if ldap_list_group_response != None: + ldap_groups = ldap_list_group_response["data"]["keys"] + + log.debug("LDAP groups from vault: %s", str(ldap_groups)) + + # Update LDAP group mapping + log.debug( + 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) + local_config_groups = auth_method.extra_config["group_policy_map"] + for key in local_config_groups: + log.debug('LDAP Group ["%s"] -> Policies %s', + str(key), local_config_groups[key]) + + client.auth.ldap.create_or_update_group( + name=key, + policies=local_config_groups[key] + ) + + # Clean up LDAP group mapping + if ldap_groups != None: + for group in ldap_groups: + if group in {k.lower(): v for k, v in local_config_groups.items()}: + log.debug( + 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) + else: + log.info( + 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) + client.auth.ldap.delete_group( + name=group + ) + log.info( + 'LDAP group mapping ["%s"] deleted.', group) + else: + log.debug( + 'Auth method "%s" does not contain any extra configurations.', auth_method.type + ) + # Build return object + ret['changes']['old'] = json.loads(json.dumps( + [ob.type for ob in remote_methods])) + + if len(new_auth_methods) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_auth_methods)) + else: + ret['changes']['new'] = "No changes" + + log.info('Finished processing and configuring auth methods...') + except Exception: + raise + + def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): + log.info('Cleaning up auth methods...') + has_change = False + + try: + for auth_method in remote_methods: + if auth_method not in local_methods: + has_change = True + log.info( + 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) + client.sys.disable_auth_method( + path=auth_method.path + ) + log.info('Auth method "%s" is disabled.', auth_method.type) + + log.info('Finished cleaning up auth methods.') + if has_change: + ret['changes']['new'] = json.loads(json.dumps( + [ob.type for ob in local_methods])) + except Exception: + raise class VaultSecretsManager(): - """ - Module for handling Vault secret engines - """ - - def __init__(self): - log.info("Initializing Vault Secret Manager...") - - def get_remote_secrets_engines(self, client, ret): - """ - Retrieve secret engines from vault server - """ - log.info('Retrieving secrets engines from Vault') - remote_secret_engines = [] - try: - log.info(client) - secrets_engines_resp = client.sys.list_mounted_secrets_engines() - for engine in secrets_engines_resp['data']: - remote_secret_engines.append( - VaultSecretEngine( - type=secrets_engines_resp[engine]['type'], - path=(secrets_engines_resp[engine]["path"] - if 'path' in secrets_engines_resp[engine] else engine), - description=secrets_engines_resp[engine]["description"], - config=OrderedDict( - sorted(secrets_engines_resp[engine]["config"].items())) - ) - ) - remote_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise - - log.info('Finished retrieving secrets engines from vault.') - return remote_secret_engines - - def populate_local_secrets_engines(self, configs, ret): - """ - Retrieving secret engines from local config file - """ - log.info('Populating local secret engines...') - local_secret_engines = [] - try: - for secret_engine in configs: - config = None - secret_config = None - extra_config = None - - if 'secret_config' in secret_engine: - if secret_engine["secret_config"] != None: - secret_config = OrderedDict( - sorted(secret_engine["secret_config"].items())) - - if 'extra_config' in secret_engine: - if secret_engine["extra_config"] != None: - extra_config = OrderedDict( - sorted(secret_engine["extra_config"].items())) - - if 'config' in secret_engine: - if secret_engine["config"] != None: - config = OrderedDict( - sorted(secret_engine["config"].items())) - - local_secret_engines.append(VaultSecretEngine( - type=secret_engine["type"], - path=secret_engine["path"], - description=secret_engine["description"], - config=config, - secret_config=secret_config, - extra_config=extra_config - )) - - local_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise - - log.info('Finished populating local secret engines.') - return local_secret_engines - - def configure_secrets_engines(self, client, remote_engines, local_engines, ret): - log.info('Processing and configuring secrets engines...') - new_secrets_engines = [] - try: - for secret_engine in local_engines: - log.debug('Checking if secret engine "%s" at path "%s" is enabled...', - secret_engine.type, - secret_engine.path) - if secret_engine in remote_engines: - log.debug( - 'Secret engine "%s" at path "%s" is already enabled. Tuning...', - secret_engine.type, - secret_engine.path) - - client.sys.tune_mount_configuration( - path=secret_engine.path, - description=secret_engine.description, - default_lease_ttl=secret_engine.config["default_lease_ttl"], - max_lease_ttl=secret_engine.config["max_lease_ttl"] - ) - log.debug('Secret engine "%s" at path "%s" is tuned.', - secret_engine.type, secret_engine.path) - else: - log.debug( - 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', - secret_engine.type, - secret_engine.path) - new_secrets_engines.append(secret_engine.type) - client.sys.enable_secrets_engine( - backend_type=secret_engine.type, - path=secret_engine.path, - description=secret_engine.description, - config=secret_engine.config - ) - log.debug('Secret engine " % s" at path " % s" is enabled.', - secret_engine.type, secret_engine.path) - - if secret_engine.secret_config != None: - log.info( - 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - client.secrets.activedirectory.configure( - **secret_engine.secret_config - ) - if secret_engine.type == 'database': - client.secrets.database.configure( - **secret_engine.secret_config - ) - - log.info( - 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.extra_config != None: - log.info( - 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - # Get roles from vault - existing_roles = None - try: - existing_roles = client.secrets.activedirectory.list_roles() - log.debug(existing_roles) - except Exception as e: - log.exception(e) - - # Add new roles - local_roles = secret_engine.extra_config['roles'] - for key in local_roles: - log.debug('AD Role ["%s"] -> Role %s', - str(key), local_roles[key]) - try: - client.secrets.activedirectory.create_or_update_role( - name=key, - service_account_name=local_roles[key]['service_account_name'], - ttl=local_roles[key]['ttl'] - ) - except Exception as e: - log.exception(e) - raise salt.exceptions.SaltInvocationError(e) - - # Remove missing roles - if existing_roles != None: - for role in existing_roles: - if role in {k.lower(): v for k, v in local_roles.items()}: - log.debug( - 'AD role ["%s"] exists in configuration, no cleanup necessary', role) - else: - log.info( - 'Ad role ["%s"] does not exists in configuration, deleting...', role) - client.secrets.activedirectory.delete_role( - name=role - ) - log.info( - 'AD role has been ["%s"] deleted.', role) - else: - log.debug( - 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type - ) - except Exception: - raise - - log.info('Finished proccessing and configuring secrets engines.') - - # Build return object - ret['changes']['old'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) - - if len(new_secrets_engines) > 0: - ret['changes']['new'] = json.loads( - json.dumps(new_secrets_engines)) - else: - ret['changes']['new'] = "No changes" - - def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): - log.info('Cleaning up secrets engines...') - has_changes = False - - try: - for secret_engine in remote_engines: - if not (secret_engine.type == "system" or - secret_engine.type == "cubbyhole" or - secret_engine.type == "identity" or - secret_engine.type == "generic"): - if secret_engine in local_engines: - log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', - secret_engine.type, secret_engine.path) - else: - log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', - secret_engine.type, secret_engine.path) - has_changes = True - client.sys.disable_secrets_engine( - path=secret_engine.path - ) - log.info('Secrets engine "%s" at path "%s" is disabled.', - secret_engine.type, secret_engine.type) - except Exception: - raise - - log.info('Finished cleaning up secrets engines.') - - if has_changes: - ret['changes']['new'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in local_engines])) + """ + Module for handling Vault secret engines + """ + + def __init__(self): + log.info("Initializing Vault Secret Manager...") + + def get_remote_secrets_engines(self, client, ret): + """ + Retrieve secret engines from vault server + """ + log.info('Retrieving secrets engines from Vault') + remote_secret_engines = [] + try: + log.info(client) + secrets_engines_resp = client.sys.list_mounted_secrets_engines() + for engine in secrets_engines_resp['data']: + remote_secret_engines.append( + VaultSecretEngine( + type=secrets_engines_resp[engine]['type'], + path=(secrets_engines_resp[engine]["path"] + if 'path' in secrets_engines_resp[engine] else engine), + description=secrets_engines_resp[engine]["description"], + config=OrderedDict( + sorted(secrets_engines_resp[engine]["config"].items())) + ) + ) + remote_secret_engines.sort(key=lambda x: x.type) + except Exception: + raise + + log.info('Finished retrieving secrets engines from vault.') + return remote_secret_engines + + def populate_local_secrets_engines(self, configs, ret): + """ + Retrieving secret engines from local config file + """ + log.info('Populating local secret engines...') + local_secret_engines = [] + try: + for secret_engine in configs: + config = None + secret_config = None + extra_config = None + + if 'secret_config' in secret_engine: + if secret_engine["secret_config"] != None: + secret_config = OrderedDict( + sorted(secret_engine["secret_config"].items())) + + if 'extra_config' in secret_engine: + if secret_engine["extra_config"] != None: + extra_config = OrderedDict( + sorted(secret_engine["extra_config"].items())) + + if 'config' in secret_engine: + if secret_engine["config"] != None: + config = OrderedDict( + sorted(secret_engine["config"].items())) + + local_secret_engines.append(VaultSecretEngine( + type=secret_engine["type"], + path=secret_engine["path"], + description=secret_engine["description"], + config=config, + secret_config=secret_config, + extra_config=extra_config + )) + + local_secret_engines.sort(key=lambda x: x.type) + except Exception: + raise + + log.info('Finished populating local secret engines.') + return local_secret_engines + + def configure_secrets_engines(self, client, remote_engines, local_engines, ret): + log.info('Processing and configuring secrets engines...') + new_secrets_engines = [] + try: + for secret_engine in local_engines: + log.debug('Checking if secret engine "%s" at path "%s" is enabled...', + secret_engine.type, + secret_engine.path) + if secret_engine in remote_engines: + log.debug( + 'Secret engine "%s" at path "%s" is already enabled. Tuning...', + secret_engine.type, + secret_engine.path) + + client.sys.tune_mount_configuration( + path=secret_engine.path, + description=secret_engine.description, + default_lease_ttl=secret_engine.config["default_lease_ttl"], + max_lease_ttl=secret_engine.config["max_lease_ttl"] + ) + log.debug('Secret engine "%s" at path "%s" is tuned.', + secret_engine.type, secret_engine.path) + else: + log.debug( + 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', + secret_engine.type, + secret_engine.path) + new_secrets_engines.append(secret_engine.type) + client.sys.enable_secrets_engine( + backend_type=secret_engine.type, + path=secret_engine.path, + description=secret_engine.description, + config=secret_engine.config + ) + log.debug('Secret engine " % s" at path " % s" is enabled.', + secret_engine.type, secret_engine.path) + + if secret_engine.secret_config != None: + log.info( + 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + client.secrets.activedirectory.configure( + **secret_engine.secret_config + ) + if secret_engine.type == 'database': + client.secrets.database.configure( + **secret_engine.secret_config + ) + + log.info( + 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.extra_config != None: + log.info( + 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + # Get roles from vault + existing_roles = None + try: + existing_roles = client.secrets.activedirectory.list_roles() + log.debug(existing_roles) + except Exception as e: + log.exception(e) + + # Add new roles + local_roles = secret_engine.extra_config['roles'] + for key in local_roles: + log.debug('AD Role ["%s"] -> Role %s', + str(key), local_roles[key]) + try: + client.secrets.activedirectory.create_or_update_role( + name=key, + service_account_name=local_roles[key]['service_account_name'], + ttl=local_roles[key]['ttl'] + ) + except Exception as e: + log.exception(e) + raise salt.exceptions.SaltInvocationError(e) + + # Remove missing roles + if existing_roles != None: + for role in existing_roles: + if role in {k.lower(): v for k, v in local_roles.items()}: + log.debug( + 'AD role ["%s"] exists in configuration, no cleanup necessary', role) + else: + log.info( + 'Ad role ["%s"] does not exists in configuration, deleting...', role) + client.secrets.activedirectory.delete_role( + name=role + ) + log.info( + 'AD role has been ["%s"] deleted.', role) + else: + log.debug( + 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type + ) + except Exception: + raise + + log.info('Finished proccessing and configuring secrets engines.') + + # Build return object + ret['changes']['old'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) + + if len(new_secrets_engines) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_secrets_engines)) + else: + ret['changes']['new'] = "No changes" + + def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): + log.info('Cleaning up secrets engines...') + has_changes = False + + try: + for secret_engine in remote_engines: + if not (secret_engine.type == "system" or + secret_engine.type == "cubbyhole" or + secret_engine.type == "identity" or + secret_engine.type == "generic"): + if secret_engine in local_engines: + log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', + secret_engine.type, secret_engine.path) + else: + log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', + secret_engine.type, secret_engine.path) + has_changes = True + client.sys.disable_secrets_engine( + path=secret_engine.path + ) + log.info('Secrets engine "%s" at path "%s" is disabled.', + secret_engine.type, secret_engine.type) + except Exception: + raise + + log.info('Finished cleaning up secrets engines.') + + if has_changes: + ret['changes']['new'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in local_engines])) class VaultAuditManager(): - """ - Module for handling Vault audit devices - """ - - def __init__(self): - log.info("Initializing Vault Audit Manager...") - - def get_remote_audit_devices(self, client, ret): - log.info("Retrieving audit devices from vault...") - devices = [] - try: - audit_devices_resp = client.sys.list_enabled_audit_devices() - for device in audit_devices_resp['data']: - audit_device = audit_devices_resp[device] - devices.append( - VaultAuditDevice( - type=audit_device['type'], - path=(audit_device["path"] - if 'path' in audit_device else device), - description=audit_device["description"], - options=json.dumps(audit_device["options"]) - ) - ) - - log.info('Finished retrieving audit devices from vault.') - except Exception: - raise - - return devices - - def get_local_audit_devices(self, configs, ret): - log.info("Loading audit devices from local config...") - devices = [] - if configs: - try: - for audit_device in configs: - if 'options' in audit_device: - options = json.dumps(audit_device["options"]) - log.debug(options) - - devices.append( - VaultAuditDevice( - type=audit_device["type"], - path=audit_device["path"], - description=audit_device["description"], - options=options - ) - ) - - log.info('Finished loading audit devices from local config.') - except Exception: - raise - - return devices - - def configure_audit_devices(self, client, remote_devices, local_devices, ret): - log.info('Processing and configuring audit devices...') - new_audit_devices = [] - try: - for audit_device in local_devices: - log.debug('Checking if audit device "%s" at path "%s" is enabled...', - audit_device.type, audit_device.path) - - if audit_device in remote_devices: - log.debug('Audit device "%s" at path "%s" is already enabled.', - audit_device.type, audit_device.path) - else: - log.debug( - 'Audit device "%s" at path "%s" is not enabled. Enabling now...', audit_device.type, audit_device.path) - new_audit_devices.append(audit_device.type) - client.sys.enable_audit_device( - device_type=audit_device.type, - path=audit_device.path, - description=audit_device.description, - options=json.loads(audit_device.options) - ) - log.debug('Audit device "%s" at path "%s" is enabled.', - audit_device.type, audit_device.path) - - log.info('Finished processing audit devices.') - # Build return object - ret['changes']['old'] = json.loads(json.dumps( - [ob.type for ob in remote_devices])) - - if len(new_audit_devices) > 0: - ret['changes']['new'] = json.loads( - json.dumps(new_audit_devices)) - else: - ret['changes']['new'] = "No changes" - - except Exception: - raise - - def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): - log.info('Cleaning up audit devices...') - has_changes = False - try: - for audit_device in remote_devices: - if audit_device not in local_devices: - log.info('Disabling audit device "%s" at path "%s"...', - audit_device.type, audit_device.path) - has_changes = True - client.sys.disable_audit_device( - path=audit_device.path - ) - log.info('Finished cleaning up audit devices.') - - if has_changes: - ret['changes']['new'] = json.loads(json.dumps( - [ob.type for ob in local_devices])) - except Exception: - raise + """ + Module for handling Vault audit devices + """ + + def __init__(self): + log.info("Initializing Vault Audit Manager...") + + def get_remote_audit_devices(self, client, ret): + log.info("Retrieving audit devices from vault...") + devices = [] + try: + audit_devices_resp = client.sys.list_enabled_audit_devices() + for device in audit_devices_resp['data']: + audit_device = audit_devices_resp[device] + devices.append( + VaultAuditDevice( + type=audit_device['type'], + path=(audit_device["path"] + if 'path' in audit_device else device), + description=audit_device["description"], + options=json.dumps(audit_device["options"]) + ) + ) + + log.info('Finished retrieving audit devices from vault.') + except Exception: + raise + + return devices + + def get_local_audit_devices(self, configs, ret): + log.info("Loading audit devices from local config...") + devices = [] + if configs: + try: + for audit_device in configs: + if 'options' in audit_device: + options = json.dumps(audit_device["options"]) + log.debug(options) + + devices.append( + VaultAuditDevice( + type=audit_device["type"], + path=audit_device["path"], + description=audit_device["description"], + options=options + ) + ) + + log.info('Finished loading audit devices from local config.') + except Exception: + raise + + return devices + + def configure_audit_devices(self, client, remote_devices, local_devices, ret): + log.info('Processing and configuring audit devices...') + new_audit_devices = [] + try: + for audit_device in local_devices: + log.debug('Checking if audit device "%s" at path "%s" is enabled...', + audit_device.type, audit_device.path) + + if audit_device in remote_devices: + log.debug('Audit device "%s" at path "%s" is already enabled.', + audit_device.type, audit_device.path) + else: + log.debug( + 'Audit device "%s" at path "%s" is not enabled. Enabling now...', audit_device.type, audit_device.path) + new_audit_devices.append(audit_device.type) + client.sys.enable_audit_device( + device_type=audit_device.type, + path=audit_device.path, + description=audit_device.description, + options=json.loads(audit_device.options) + ) + log.debug('Audit device "%s" at path "%s" is enabled.', + audit_device.type, audit_device.path) + + log.info('Finished processing audit devices.') + # Build return object + ret['changes']['old'] = json.loads(json.dumps( + [ob.type for ob in remote_devices])) + + if len(new_audit_devices) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_audit_devices)) + else: + ret['changes']['new'] = "No changes" + + except Exception: + raise + + def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): + log.info('Cleaning up audit devices...') + has_changes = False + try: + for audit_device in remote_devices: + if audit_device not in local_devices: + log.info('Disabling audit device "%s" at path "%s"...', + audit_device.type, audit_device.path) + has_changes = True + client.sys.disable_audit_device( + path=audit_device.path + ) + log.info('Finished cleaning up audit devices.') + + if has_changes: + ret['changes']['new'] = json.loads(json.dumps( + [ob.type for ob in local_devices])) + except Exception: + raise diff --git a/salt/_states/vault.py b/salt/_states/vault.py index 8aad145..702bff9 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -6,283 +6,283 @@ log = logging.getLogger(__name__) try: - import hvac - import boto3 - DEPS_INSTALLED = True + import hvac + import boto3 + DEPS_INSTALLED = True except ImportError as e: - log.debug('Unable to import the libraries.') - log.exception(e) - DEPS_INSTALLED = False + log.debug('Unable to import the libraries.') + log.exception(e) + DEPS_INSTALLED = False __all__ = ['initialize'] def __virtual__(): - return DEPS_INSTALLED + return DEPS_INSTALLED def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): - """ - Ensure that the vault instance has been initialized and run the - initialization if it has not. Storing the root token to SSM parameter - - Arguments: - name {string} -- The id used for the state definition - ssm_path {string} -- The path to SSM parameter that will store the root token - - Keyword Arguments: - recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) - recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) - - Returns: - ret {dict} -- Result of the execution - """ - ret = {'name': name, - 'comment': '', - 'result': '', - 'changes': {}} - - client = __utils__['vault.build_client']() - - is_initialized = client.sys.is_initialized() - - if is_initialized: - ret['result'] = True - ret['comment'] = 'Vault is already initialized' - else: - result = client.sys.initialize( - recovery_shares=recovery_shares, - recovery_threshold=recovery_threshold - ) - root_token = result['root_token'] - recovery_keys = result['recovery_keys'] - is_success = client.sys.is_initialized() - - ret['result'] = is_success - ret['changes'] = { - 'root_credentials': { - 'new': { - 'recover_keys': '/{}/{}'.format(ssm_path, 'recovery_keys'), - 'root_token': '/{}/{}'.format(ssm_path, 'root_token') - }, - 'old': {} - } - } - - # upload root token ssm parameter store - if is_success: - ssm_client = boto3.client('ssm') - # saving root token - ssm_client.put_parameter( - Name='/{}/{}'.format(ssm_path, 'root_token'), - Value=root_token, - Type="SecureString", - Overwrite=True - ) - - # saving recovery keys - ssm_client.put_parameter( - Name='/{}/{}'.format(ssm_path, 'recovery_keys'), - Value=json.dumps(recovery_keys), - Type="SecureString", - Overwrite=True - ) - - ret['comment'] = 'Vault has {}initialized'.format( - '' if is_success else 'failed to be ') - return ret + """ + Ensure that the vault instance has been initialized and run the + initialization if it has not. Storing the root token to SSM parameter + + Arguments: + name {string} -- The id used for the state definition + ssm_path {string} -- The path to SSM parameter that will store the root token + + Keyword Arguments: + recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) + recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) + + Returns: + ret {dict} -- Result of the execution + """ + ret = {'name': name, + 'comment': '', + 'result': '', + 'changes': {}} + + client = __utils__['vault.build_client']() + + is_initialized = client.sys.is_initialized() + + if is_initialized: + ret['result'] = True + ret['comment'] = 'Vault is already initialized' + else: + result = client.sys.initialize( + recovery_shares=recovery_shares, + recovery_threshold=recovery_threshold + ) + root_token = result['root_token'] + recovery_keys = result['recovery_keys'] + is_success = client.sys.is_initialized() + + ret['result'] = is_success + ret['changes'] = { + 'root_credentials': { + 'new': { + 'recover_keys': '/{}/{}'.format(ssm_path, 'recovery_keys'), + 'root_token': '/{}/{}'.format(ssm_path, 'root_token') + }, + 'old': {} + } + } + + # upload root token ssm parameter store + if is_success: + ssm_client = boto3.client('ssm') + # saving root token + ssm_client.put_parameter( + Name='/{}/{}'.format(ssm_path, 'root_token'), + Value=root_token, + Type="SecureString", + Overwrite=True + ) + + # saving recovery keys + ssm_client.put_parameter( + Name='/{}/{}'.format(ssm_path, 'recovery_keys'), + Value=json.dumps(recovery_keys), + Type="SecureString", + Overwrite=True + ) + + ret['comment'] = 'Vault has {}initialized'.format( + '' if is_success else 'failed to be ') + return ret def secret_engines_synced(name, configs=[]): - """ - Ensure secrets engines are synced with Vault + """ + Ensure secrets engines are synced with Vault - Arguments: - name {string} -- The id used for the state definition + Arguments: + name {string} -- The id used for the state definition - Keyword Arguments: - configs {list} -- A list of configuration rules that defined the secrets engines (default: []) + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the secrets engines (default: []) - Returns: - ret {dict} -- Result of the execution - """ + Returns: + ret {dict} -- Result of the execution + """ - client = __utils__['vault.build_client']() - remote_secret_engines = [] - local_secret_engines = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } + client = __utils__['vault.build_client']() + remote_secret_engines = [] + local_secret_engines = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } - log.debug(json.dumps(configs)) + log.debug(json.dumps(configs)) - secretsManager = __salt__['vault.get_secret_engines_manager']() + secretsManager = __salt__['vault.get_secret_engines_manager']() - try: - remote_secret_engines = secretsManager.get_remote_secrets_engines( - client, ret) + try: + remote_secret_engines = secretsManager.get_remote_secrets_engines( + client, ret) - local_secret_engines = secretsManager.populate_local_secrets_engines( - configs, ret) + local_secret_engines = secretsManager.populate_local_secrets_engines( + configs, ret) - secretsManager.configure_secrets_engines( - client, - remote_secret_engines, - local_secret_engines, - ret - ) + secretsManager.configure_secrets_engines( + client, + remote_secret_engines, + local_secret_engines, + ret + ) - secretsManager.cleanup_secrets_engines( - client, - remote_secret_engines, - local_secret_engines, - ret - ) + secretsManager.cleanup_secrets_engines( + client, + remote_secret_engines, + local_secret_engines, + ret + ) - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) - return ret + return ret def auth_methods_synced(name, configs=[]): - """ - Ensure authentication methods are synced with Vault - - Arguments: - name {string} -- The id used for the state definition - - Keyword Arguments: - configs {list} -- A list of configuration rules that defined the authentication methods (default: []) - Returns: - ret {dict} -- Result of the execution - """ - - client = __utils__['vault.build_client']() - remote_auth_methods = [] - local_auth_methods = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } - - authsManager = __salt__['vault.get_auth_methods_manager']() - - try: - - remote_auth_methods = authsManager.get_remote_auth_methods(client, ret) - local_auth_methods = authsManager.populate_local_auth_methods( - configs, ret) - - authsManager.configure_auth_methods( - client, - remote_auth_methods, - local_auth_methods, - ret - ) - - authsManager.cleanup_auth_methods( - client, - remote_auth_methods, - local_auth_methods, - ret - ) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - - return ret + """ + Ensure authentication methods are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the authentication methods (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_auth_methods = [] + local_auth_methods = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + authsManager = __salt__['vault.get_auth_methods_manager']() + + try: + + remote_auth_methods = authsManager.get_remote_auth_methods(client, ret) + local_auth_methods = authsManager.populate_local_auth_methods( + configs, ret) + + authsManager.configure_auth_methods( + client, + remote_auth_methods, + local_auth_methods, + ret + ) + + authsManager.cleanup_auth_methods( + client, + remote_auth_methods, + local_auth_methods, + ret + ) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + + return ret def policies_synced(name, policies=[]): - """ - Ensure policies are synced with Vault - - Arguments: - name {string} -- The id used for the state definition - - Keyword Arguments: - policies {list} -- A list of policies to by synced with Vault (default: []) - Returns: - ret {dict} -- Result of the execution - """ - - client = __utils__['vault.build_client']() - remote_policies = [] - local_policies = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } - - policiesManager = __salt__['vault.get_policies_manager']() - - try: - remote_policies = policiesManager.get_remote_policies(client, ret) - local_policies = json.loads(json.dumps(policies)) - policiesManager.push_policies( - client, remote_policies, local_policies, ret) - policiesManager.cleanup_policies( - client, remote_policies, local_policies, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - return ret + """ + Ensure policies are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + policies {list} -- A list of policies to by synced with Vault (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_policies = [] + local_policies = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + policiesManager = __salt__['vault.get_policies_manager']() + + try: + remote_policies = policiesManager.get_remote_policies(client, ret) + local_policies = json.loads(json.dumps(policies)) + policiesManager.push_policies( + client, remote_policies, local_policies, ret) + policiesManager.cleanup_policies( + client, remote_policies, local_policies, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + return ret def audit_devices_synced(name, configs=[]): - """ - Ensures audit devices are synced with Vault - - Arguments: - name {string} -- The id used for the state definition - - Keyword Arguments: - configs {list} -- A list of configuration rules that defined the audit devices (default: []) - Returns: - ret {dict} -- Result of the execution - """ - - client = __utils__['vault.build_client']() - remote_devices = [] - local_devices = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } - - auditDevicesManager = __salt__['vault.get_audit_device_manager']() - try: - - remote_devices = auditDevicesManager.get_remote_audit_devices( - client, ret) - - local_devices = auditDevicesManager.get_local_audit_devices( - configs, ret) - - auditDevicesManager.configure_audit_devices( - client, remote_devices, local_devices, ret) - - auditDevicesManager.cleanup_audit_devices( - client, remote_devices, local_devices, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - return ret + """ + Ensures audit devices are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the audit devices (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_devices = [] + local_devices = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + auditDevicesManager = __salt__['vault.get_audit_device_manager']() + try: + + remote_devices = auditDevicesManager.get_remote_audit_devices( + client, ret) + + local_devices = auditDevicesManager.get_local_audit_devices( + configs, ret) + + auditDevicesManager.configure_audit_devices( + client, remote_devices, local_devices, ret) + + auditDevicesManager.cleanup_audit_devices( + client, remote_devices, local_devices, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + return ret diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index a74ce72..5df82e5 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -16,35 +16,35 @@ def build_client(url=None, token=None): - vault_url = url if url != None else get_vault_url() - client = hvac.Client( - url=vault_url, - token=token - ) + vault_url = url if url != None else get_vault_url() + client = hvac.Client( + url=vault_url, + token=token + ) - if token == None: - client.token = os.environ.get('VAULT_TOKEN') + if token == None: + client.token = os.environ.get('VAULT_TOKEN') - return client + return client def get_vault_url(): - ''' - Returns a string consist of url and port number - ''' - port = __grains__['vault']['api_port'] if __grains__[ - 'vault']['api_port'] != None else 8200 - url = "http://localhost" + ''' + Returns a string consist of url and port number + ''' + port = __grains__['vault']['api_port'] if __grains__[ + 'vault']['api_port'] != None else 8200 + url = "http://localhost" - return "{}:{}".format(url, port) + return "{}:{}".format(url, port) def load_config_file(config_path): - configs = None - with open(os.path.join(config_path), 'r') as fd: - try: - configs = yaml.load(fd) - except yaml.YAMLError as e: - log.critical("Unable to load conf file: " + str(e)) - return False - return configs + configs = None + with open(os.path.join(config_path), 'r') as fd: + try: + configs = yaml.load(fd) + except yaml.YAMLError as e: + log.critical("Unable to load conf file: " + str(e)) + return False + return configs diff --git a/salt/vault/install.sls b/salt/vault/install.sls index c063847..236beb5 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -77,4 +77,3 @@ install_python_dependencies: - reload_modules: True - ignore_installed: True {%- endif %} - diff --git a/tests/vault-py2/variables.tf b/tests/vault-py2/variables.tf index a50b40c..8eb9b08 100644 --- a/tests/vault-py2/variables.tf +++ b/tests/vault-py2/variables.tf @@ -62,4 +62,3 @@ variable "watchmaker_config" { description = "(Optional) URL to a Watchmaker config file" default = "" } - diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf index f11047e..5a27c44 100644 --- a/tests/vault-py3/variables.tf +++ b/tests/vault-py3/variables.tf @@ -45,4 +45,3 @@ variable "watchmaker_config" { description = "(Optional) URL to a Watchmaker config file" default = "" } - From 22eace46743b577f864cde982cfc2ede1f0f90ac Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 11 Sep 2019 15:44:36 -0400 Subject: [PATCH 21/34] Adds Vagrant for local developement --- .gitignore | 4 ++ Vagrantfile | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 Vagrantfile diff --git a/.gitignore b/.gitignore index 17ccdfc..4be99ad 100755 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,7 @@ # ignore go files vendor/ .configs/ + + +#Vagrant related files +.vagrant/ diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..099dedf --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,118 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +BOX_IMAGE = "centos/7" +NODE_COUNT = 1 + +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + # config.vm.box = "centos/7" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + #config.vm.network "forwarded_port", guest: 8200, host: 8200 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + #config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + #config.vm.network "private_network", ip: "10.100.0.0" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + config.vm.synced_folder "./salt", "/srv/salt" + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the + # documentation for more information about their specific syntax and use. + (1..NODE_COUNT).each do |i| + config.vm.define "node#{i}" do |subconfig| + subconfig.vm.box = BOX_IMAGE + subconfig.vm.hostname = "node#{i}" + subconfig.vm.network :private_network, ip: "10.0.1.#{i + 10}" + subconfig.vm.network "forwarded_port", guest: 8200, host: 8200, auto_correct: true + end + end + + config.vm.provision "shell", inline: <<-SHELL + cd /root/ + yum update -y && yum upgrade -y + yum install -y curl unzip epel-release yum-utils jq + + yum install -y https://repo.saltstack.com/py3/redhat/salt-py3-repo-2018.3.el7.noarch.rpm + + yum clean expire-cache + + yum install salt-master -y + yum install salt-minion -y + + echo 'Change permission for dirs' + chmod +x /usr/local/bin/ + chgrp -R root /usr/local/bin + chown root /usr/local/bin/ + if [[ ! $(grep '/usr/local/bin' /root/.bash_profile) ]]; then + echo 'export VAULT_ADDR="http://$(hostname):8200"' >> /root/.bash_profile + echo 'export VAULT_TOKEN=root' >> /root/.bash_profile + echo 'alias l="ls -lah"' >> /root/.bash_profile + fi + + if [[ ! $(grep '/usr/local/bin' /home/vagrant/.bash_profile) ]]; then + echo 'export PATH=$PATH:/usr/local/bin' >> /home//vagrant/.bash_profile + echo 'export VAULT_ADDR="http://$(hostname):8200"' >> /home/vagrant/.bash_profile + echo 'export VAULT_TOKEN=root' >> /home/vagrant/.bash_profile + echo 'alias l="ls -lah"' >> /home/vagrant/.bash_profile + fi + + + SHELL + + config.vm.provision "shell", inline: <<-SHELL + + echo "Setting the required salt grains for vault..." + salt-call --local grains.setval vault '{"dev_mode": true, "dev_configs": "-dev -dev-root-token-id=root", "api_port": 8200, "cluster_port": 8201}' + + echo "Updating salt states/modules/utils/grains..." + salt-call --local saltutil.sync_all + SHELL + +end From 3f3a098e50865c64a4f8f4fe8d5410655e39b449 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 11 Sep 2019 16:05:42 -0400 Subject: [PATCH 22/34] Updates salt formula to work with local vagrantfile --- salt/vault/configure.sls | 9 ++++----- salt/vault/files/server.dev.hcl.jinja | 6 ++++++ salt/vault/files/vault.conf.jinja | 2 +- salt/vault/files/vault.service.jinja | 2 +- salt/vault/maps/defaults.yaml | 10 +++++++++- salt/vault/service.sls | 26 +++++++++++++++++--------- 6 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 salt/vault/files/server.dev.hcl.jinja diff --git a/salt/vault/configure.sls b/salt/vault/configure.sls index a1943a5..cccea3b 100644 --- a/salt/vault/configure.sls +++ b/salt/vault/configure.sls @@ -1,12 +1,13 @@ {% from "vault/map.jinja" import vault with context %} -{# only configure if vault is not in dev_mode #} -{%- if not vault.dev_mode %} - vault_configure_service_file: file.managed: +{%- if vault.dev_mode %} + - source: salt://vault/files/server.dev.hcl.jinja +{% else %} - source: salt://vault/files/server.hcl.jinja +{%- endif %} - name: /etc/vault/conf.d/server.hcl - template: jinja - defaults: @@ -24,5 +25,3 @@ vault_configure_service_file: - group: root - mode: '0755' - makedirs: True - -{%- endif %} diff --git a/salt/vault/files/server.dev.hcl.jinja b/salt/vault/files/server.dev.hcl.jinja new file mode 100644 index 0000000..c2d0b07 --- /dev/null +++ b/salt/vault/files/server.dev.hcl.jinja @@ -0,0 +1,6 @@ + +storage "file" { + path = "/etc/vault/data" +} + +ui = true diff --git a/salt/vault/files/vault.conf.jinja b/salt/vault/files/vault.conf.jinja index 3b181c6..f5b987d 100644 --- a/salt/vault/files/vault.conf.jinja +++ b/salt/vault/files/vault.conf.jinja @@ -13,5 +13,5 @@ script # Make sure to use all our CPUs, because Vault can block a scheduler thread export GOMAXPROCS=`nproc` - exec /usr/local/bin/vault server {{ config }} >>/var/log/vault.log 2>&1 + exec /usr/local/bin/vault server -config=/etc/vault/conf.d {{ dev_configs }} >>/var/log/vault.log 2>&1 end script diff --git a/salt/vault/files/vault.service.jinja b/salt/vault/files/vault.service.jinja index b1fc94b..940a140 100644 --- a/salt/vault/files/vault.service.jinja +++ b/salt/vault/files/vault.service.jinja @@ -7,7 +7,7 @@ After=network-online.target User=vault Group=vault PIDFile=/var/run/vault/vault.pid -ExecStart=/usr/local/bin/vault server {{ config }} +ExecStart=/usr/local/bin/vault server -config=/etc/vault/conf.d {{ dev_configs }} ExecReload=/bin/kill -HUP $MAINPID KillMode=process KillSignal=SIGTERM diff --git a/salt/vault/maps/defaults.yaml b/salt/vault/maps/defaults.yaml index cfb90cb..b9b0d80 100644 --- a/salt/vault/maps/defaults.yaml +++ b/salt/vault/maps/defaults.yaml @@ -1,9 +1,12 @@ # -*- coding: utf-8 -*- vault: + version: 1.1.3 repo_base_url: "https://releases.hashicorp.com/vault" dev_mode: False - verify_download: True + dev_configs: "" + api_port: 8200 + cluster_port: 8201 listener_address: "0.0.0.0" inbound_cidrs: - "10.0.0.0/16" @@ -12,3 +15,8 @@ vault: max_lease_ttl: 192h #one week recovery_shares: 5 recovery_threshold: 3 + config_dir_path: /srv/salt/vault/configs + region: "" + dynamodb_table: "" + kms_key_id: "" + diff --git a/salt/vault/service.sls b/salt/vault/service.sls index 99fe944..57e1ca3 100644 --- a/salt/vault/service.sls +++ b/salt/vault/service.sls @@ -1,20 +1,18 @@ {% from "vault/map.jinja" import vault with context %} -manage_selinux_mode: - selinux.mode: - - name: permissive - vault_service_init_file_managed: file.managed: - name: {{ vault.service.path }} - source: {{ vault.service.source }} - template: jinja - defaults: -{%- if vault.dev_mode %} - config: -dev -dev-root-token-id=root -config /srv/salt/vault/files/server.dev.hcl -{% else %} - config: -config=/etc/vault/conf.d -{% endif -%} + dev_configs: {{ vault.dev_configs }} + +{%- if not vault.dev_mode %} + +manage_selinux_mode: + selinux.mode: + - name: permissive vault_service_running: service.running: @@ -23,3 +21,13 @@ vault_service_running: - reload: True - require: - selinux: manage_selinux_mode + +{%- else %} + +vault_service_running: + service.running: + - name: vault + - enable: True + - reload: True + +{%- endif %} From f86b42828362d638d9cf35d3ec9f7f560cf2682b Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 11 Sep 2019 16:08:35 -0400 Subject: [PATCH 23/34] Performs cleaning up based on feedback standardized name of the log files removed unused variables for the main.tf formatted appscript to avoid masking errors removed unused pillar in salt maps updated Readme.md to reflect with the change updated .py files to use spaces instead of tabs indent --- .editorconfig | 4 +- Makefile | 2 +- README.md | 2 - main.tf | 12 +- salt/_modules/vault.py | 1630 ++++++++++++++++----------------- salt/_states/vault.py | 494 +++++----- salt/_utils/vault.py | 46 +- salt/vault/map.jinja | 5 +- salt/vault/maps/defaults.yaml | 1 - scripts/appscript.sh | 34 +- variables.tf | 12 - 11 files changed, 1115 insertions(+), 1127 deletions(-) diff --git a/.editorconfig b/.editorconfig index 5134e9c..a2f7c68 100755 --- a/.editorconfig +++ b/.editorconfig @@ -13,8 +13,8 @@ charset = utf-8 trim_trailing_whitespace = false [*.py] -indent_style = tab -indent_size = 4 +indent_style = space +indent_size = 2 [*.go] indent_style = tab diff --git a/Makefile b/Makefile index 1aecccc..8bbadbd 100755 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ terraform/lint: | guard/program/terraform sh/%: FIND_SH := find . $(FIND_EXCLUDES) -name '*.sh' -type f -print0 sh/lint: | guard/program/shellcheck @ echo "[$@]: Linting shell scripts..." - $(FIND_SH) | $(XARGS) shellcheck {} -e SC2154,SC2155,SC2086 + $(FIND_SH) | $(XARGS) shellcheck {} -e SC2154,SC2086 @ echo "[$@]: Shell scripts PASSED lint test!" json/%: FIND_JSON := find . $(FIND_EXCLUDES) -name '*.json' -type f diff --git a/README.md b/README.md index 9ea05c6..4f35c59 100755 --- a/README.md +++ b/README.md @@ -6,7 +6,6 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| additional\_ips\_allow\_inbound | List of ip address that allow to have access to resources | list(string) | `` | no | | ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | | ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | | ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | @@ -23,7 +22,6 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | dynamodb\_target\_value | (Optional) The target value for the metric of the scaling policy configuration. | number | `"70"` | no | | ec2\_extra\_security\_group\_ids | List of additional security groups to add to EC2 instances | list(string) | `` | no | | ec2\_subnet\_ids | List of subnets where EC2 instances will be launched | list(string) | n/a | yes | -| enable\_access\_logs | Boolean indicating whether to enable access logs for load balancer | bool | `"false"` | no | | enabled\_repos | (Optional) List of repos to be enabled with yum-config-manager. Epel repo will be enabled by default. | list(string) | `` | no | | environment | Type of environment -- must be one of: dev, test, prod | string | n/a | yes | | inbound\_cidrs | (Optional) IP address or range of addresses to be allowed to Firewall Zone. | list(string) | `` | no | diff --git a/main.tf b/main.tf index 2f64fe2..9885eec 100644 --- a/main.tf +++ b/main.tf @@ -32,9 +32,9 @@ locals { # Logs files to be streamed to CloudWatch Logs logs = [ - join("/", [local.logs_path, "salt_call.log"]), - join("/", [local.logs_path, "initialize.log"]), - join("/", [local.logs_path, "sync_config.log"]) + join("/", [local.logs_path, "state.vault.log"]), + join("/", [local.logs_path, "state.vault.initialize.log"]), + join("/", [local.logs_path, "state.vault.sync.log"]) ] tags = merge(var.tags, @@ -204,12 +204,6 @@ resource "aws_lb" "this" { security_groups = [aws_security_group.lb.id] subnets = var.lb_subnet_ids - access_logs { - enabled = var.enable_access_logs - bucket = module.s3_bucket.id - prefix = "ALBLogs" - } - tags = merge({ Name = var.name }, local.tags) } diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 9bdbaf0..36e0d49 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -15,866 +15,866 @@ log = logging.getLogger(__name__) try: - import hvac - DEPS_INSTALLED = True + import hvac + DEPS_INSTALLED = True except ImportError as e: - log.debug('Unable to import the dependencies...') - log.exception(e) - DEPS_INSTALLED = False + log.debug('Unable to import the dependencies...') + log.exception(e) + DEPS_INSTALLED = False class InsufficientParameters(Exception): - pass + pass def __virtual__(): - return DEPS_INSTALLED + return DEPS_INSTALLED def get_policies_manager(): - """ - Retrieve an object containing helper methods for the policy manager + """ + Retrieve an object containing helper methods for the policy manager - Returns: - [VaultPolicyManager] -- Policy Manager - """ - return VaultPolicyManager() + Returns: + [VaultPolicyManager] -- Policy Manager + """ + return VaultPolicyManager() def get_secret_engines_manager(): - """ - Retrieve an object containing helper methods for the secrets engines manager + """ + Retrieve an object containing helper methods for the secrets engines manager - Returns: - [VaultSecretsManager] -- Secrets Engines Manager - """ - return VaultSecretsManager() + Returns: + [VaultSecretsManager] -- Secrets Engines Manager + """ + return VaultSecretsManager() def get_auth_methods_manager(): - """[summary] - Retrieve an object containing helper methods for the auth methods manager + """[summary] + Retrieve an object containing helper methods for the auth methods manager - Returns: - [VaultAuthManager] -- Auth Methods Manager - """ - return VaultAuthManager() + Returns: + [VaultAuthManager] -- Auth Methods Manager + """ + return VaultAuthManager() def get_audit_device_manager(): - """[summary] - Retrieve an object containing helper methods for the audit device manager + """[summary] + Retrieve an object containing helper methods for the audit device manager - Returns: - [VaultAuditManager] -- Audit Device Manager - """ - return VaultAuditManager() + Returns: + [VaultAuditManager] -- Audit Device Manager + """ + return VaultAuditManager() class VaultAuthMethod: - type = None - path = None - description = None - config = None - auth_config = None - extra_config = None - - def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): - """ - Instanciate class - - :param type: Authentication type - :type type: str - :param path: Authentication mount point - :type path: str - :param description: Authentication description - :type description: str - :param config: Authentication config - :type config: dict - :param auth_config: Authentification specific configuration - :type auth_config: dict - :param extra_config: Extra Authentification configurations - :type extra_config: dict - """ - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.config = {} - for elem in config: - if config[elem] != "": - self.config[elem] = config[elem] - self.auth_config = auth_config - self.extra_config = extra_config - - def get_unique_id(self): - """ - Return a unique hash by auth method only using the type and path - - :return: str - """ - unique_str = str(self.type + self.path) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash - - def get_tuning_hash(self): - """ - Return a unique ID per tuning configuration - - :return: str - """ - conf_str = self.description + str(self.config) - sha256_hash = hashlib.sha256(conf_str.encode()).hexdigest() - return sha256_hash - - def __eq__(self, other): - return self.get_unique_id() == other.get_unique_id() - - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + type = None + path = None + description = None + config = None + auth_config = None + extra_config = None + + def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): + """ + Instanciate class + + :param type: Authentication type + :type type: str + :param path: Authentication mount point + :type path: str + :param description: Authentication description + :type description: str + :param config: Authentication config + :type config: dict + :param auth_config: Authentification specific configuration + :type auth_config: dict + :param extra_config: Extra Authentification configurations + :type extra_config: dict + """ + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.config = {} + for elem in config: + if config[elem] != "": + self.config[elem] = config[elem] + self.auth_config = auth_config + self.extra_config = extra_config + + def get_unique_id(self): + """ + Return a unique hash by auth method only using the type and path + + :return: str + """ + unique_str = str(self.type + self.path) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def get_tuning_hash(self): + """ + Return a unique ID per tuning configuration + + :return: str + """ + conf_str = self.description + str(self.config) + sha256_hash = hashlib.sha256(conf_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_unique_id() == other.get_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.config), self.get_unique_id())) class VaultSecretEngine: - """ - Vault secrete engine container - """ - type = None - path = None - description = None - config = None - secret_config = None - extra_config = None - - def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): - """ - Instantiate Class - - :param type: Secret type - :type type: str - :param path: Secret mount point - :type path: str - :param description: Secret description - :type description: str - :param config: Secret basic config - :type config: dict - :param secret_config: Secret specific configuration - :type secret_config: dict - :param extra_config: Secret extra configuration - :type extra_config: dict - """ - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.config = dict() - self.config["force_no_cache"] = False - for elem in config: - if config[elem] != "": - self.config[elem] = config[elem] - self.secret_config = secret_config - self.extra_config = extra_config - - def get_unique_id(self): - """ - Return a unique hash by secret engine only using the type and path - - :return: str - """ - unique_str = str(self.type + self.path) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash - - def __eq__(self, other): - return self.get_unique_id() == other.get_unique_id() - - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.config), - self.get_unique_id())) + """ + Vault secrete engine container + """ + type = None + path = None + description = None + config = None + secret_config = None + extra_config = None + + def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): + """ + Instantiate Class + + :param type: Secret type + :type type: str + :param path: Secret mount point + :type path: str + :param description: Secret description + :type description: str + :param config: Secret basic config + :type config: dict + :param secret_config: Secret specific configuration + :type secret_config: dict + :param extra_config: Secret extra configuration + :type extra_config: dict + """ + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.config = dict() + self.config["force_no_cache"] = False + for elem in config: + if config[elem] != "": + self.config[elem] = config[elem] + self.secret_config = secret_config + self.extra_config = extra_config + + def get_unique_id(self): + """ + Return a unique hash by secret engine only using the type and path + + :return: str + """ + unique_str = str(self.type + self.path) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash + + def __eq__(self, other): + return self.get_unique_id() == other.get_unique_id() + + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.config), self.get_unique_id())) class VaultAuditDevice: - type = None - path = None - description = None - options = None + type = None + path = None + description = None + options = None - def __init__(self, type, path, description, options): - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.options = options + def __init__(self, type, path, description, options): + self.type = type + self.path = path.replace("/", "") + self.description = (description if description else "") + self.options = options - def get_device_unique_id(self): - unique_str = str(self.type + self.path + - self.description + str(self.options)) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash + def get_device_unique_id(self): + unique_str = str(self.type + self.path + + self.description + str(self.options)) + sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() + return sha256_hash - def __eq__(self, other): - return self.get_device_unique_id() == other.get_device_unique_id() + def __eq__(self, other): + return self.get_device_unique_id() == other.get_device_unique_id() - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.options), - self.get_device_unique_id())) + def __repr__(self): + return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + (self.path, self.type, self.description, str(self.options), self.get_device_unique_id())) class VaultPolicyManager(): - """ - Module for managing policies within Vault - """ - - def __init__(self): - log.info("Initializing Vault Policy Manager...") - - def get_remote_policies(self, client, ret): - """ - Reading policies from configs folder - """ - log.info('Retrieving policies from vault...') - polices = [] - try: - policies_resp = client.sys.list_policies() - - for policy in policies_resp['data']['policies']: - if not (policy == 'root' or policy == 'default'): - polices.append(policy) - - log.debug('Current policies: %s' % - ', '.join(polices)) - log.info('Finished retrieving policies from vault.') - - except Exception as e: - ret['result'] = False - log.exception(e) - - return polices - - def load_local_policies(self, policy_dir, ret): - """ - Reading policies from configs folder - """ - log.info('Loading policies from local config folder...') - policies = [] - try: - for policy_file in glob.iglob(os.path.join(policy_dir, "*.hcl")): - name = os.path.splitext(os.path.basename(policy_file))[0] - prefix = policy_file.split(os.sep)[-2] - log.debug("Local policy %s - prefix: %s - name: %s found" - % (policy_file, prefix, name)) - - with open(policy_file, 'r') as fd: - policies.append({ - "name": name, - "content": fd.read() - }) - - log.info('Finished loading policies local config folder.') - except Exception: - raise - - return policies - - def push_policies(self, client, remote_policies, local_policies, ret): - """ - Sync policies from configs folder to vault - """ - log.info('Pushing policies from local config folder to vault...') - new_policies = [] - try: - for policy in local_policies: - client.sys.create_or_update_policy( - name=policy['name'], - policy=policy['content'] - ) - if policy['name'] in remote_policies: - log.debug('Policy "%s" has been updated.', policy["name"]) - else: - new_policies.append(policy["name"]) - log.debug('Policy "%s" has been created.', policy["name"]) - - log.info('Finished pushing policies local config folder to vault.') - - # Build return object - ret['changes']['old'] = remote_policies - if len(new_policies) > 0: - ret['changes']['new'] = json.loads(json.dumps(new_policies)) - else: - ret['changes']['new'] = "No changes" - except Exception as e: - ret['result'] = False - log.exception(e) - - def cleanup_policies(self, client, remote_policies, local_policies, ret): - """ - Cleaning up policies - """ - log.info('Cleaning up vault policies...') - has_change = False - try: - for policy in remote_policies: - if policy not in [pol['name'] for pol in local_policies]: - log.debug( - '"%s" is not found in configs folder. Removing it from vault...', policy) - has_change = True - client.sys.delete_policy(name=policy) - log.debug('"%s" is removed.', policy) - - if has_change: - ret['change']['new'] = json.loads(json.dumps( - [ob['name'] for ob in local_policies])) - - log.info('Finished cleaning up vault policies.') - except Exception as e: - ret['result'] = False - log.exception(e) - - def sync(self, client, policy_dir, ret): - - log.info('-------------------------------------') - - remote_policies = [] - local_policies = [] - - if client == None: - client = __utils__['vault.build_client']() - try: - remote_policies = self.get_remote_policies(client, ret) - local_policies = self.get_local_policies(policy_dir, ret) - self.push_policies(client, remote_policies, local_policies, ret) - self.cleanup_policies(client, remote_policies, local_policies, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - log.info('-------------------------------------') - return ret + """ + Module for managing policies within Vault + """ + + def __init__(self): + log.info("Initializing Vault Policy Manager...") + + def get_remote_policies(self, client, ret): + """ + Reading policies from configs folder + """ + log.info('Retrieving policies from vault...') + polices = [] + try: + policies_resp = client.sys.list_policies() + + for policy in policies_resp['data']['policies']: + if not (policy == 'root' or policy == 'default'): + polices.append(policy) + + log.debug('Current policies: %s' % + ', '.join(polices)) + log.info('Finished retrieving policies from vault.') + + except Exception as e: + ret['result'] = False + log.exception(e) + + return polices + + def load_local_policies(self, policy_dir, ret): + """ + Reading policies from configs folder + """ + log.info('Loading policies from local config folder...') + policies = [] + try: + for policy_file in glob.iglob(os.path.join(policy_dir, "*.hcl")): + name = os.path.splitext(os.path.basename(policy_file))[0] + prefix = policy_file.split(os.sep)[-2] + log.debug("Local policy %s - prefix: %s - name: %s found" + % (policy_file, prefix, name)) + + with open(policy_file, 'r') as fd: + policies.append({ + "name": name, + "content": fd.read() + }) + + log.info('Finished loading policies local config folder.') + except Exception: + raise + + return policies + + def push_policies(self, client, remote_policies, local_policies, ret): + """ + Sync policies from configs folder to vault + """ + log.info('Pushing policies from local config folder to vault...') + new_policies = [] + try: + for policy in local_policies: + client.sys.create_or_update_policy( + name=policy['name'], + policy=policy['content'] + ) + if policy['name'] in remote_policies: + log.debug('Policy "%s" has been updated.', policy["name"]) + else: + new_policies.append(policy["name"]) + log.debug('Policy "%s" has been created.', policy["name"]) + + log.info('Finished pushing policies local config folder to vault.') + + # Build return object + ret['changes']['old'] = remote_policies + if len(new_policies) > 0: + ret['changes']['new'] = json.loads(json.dumps(new_policies)) + else: + ret['changes']['new'] = "No changes" + except Exception as e: + ret['result'] = False + log.exception(e) + + def cleanup_policies(self, client, remote_policies, local_policies, ret): + """ + Cleaning up policies + """ + log.info('Cleaning up vault policies...') + has_change = False + try: + for policy in remote_policies: + if policy not in [pol['name'] for pol in local_policies]: + log.debug( + '"%s" is not found in configs folder. Removing it from vault...', policy) + has_change = True + client.sys.delete_policy(name=policy) + log.debug('"%s" is removed.', policy) + + if has_change: + ret['change']['new'] = json.loads(json.dumps( + [ob['name'] for ob in local_policies])) + + log.info('Finished cleaning up vault policies.') + except Exception as e: + ret['result'] = False + log.exception(e) + + def sync(self, client, policy_dir, ret): + + log.info('-------------------------------------') + + remote_policies = [] + local_policies = [] + + if client == None: + client = __utils__['vault.build_client']() + try: + remote_policies = self.get_remote_policies(client, ret) + local_policies = self.get_local_policies(policy_dir, ret) + self.push_policies(client, remote_policies, local_policies, ret) + self.cleanup_policies(client, remote_policies, local_policies, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + log.info('-------------------------------------') + return ret class VaultAuthManager(): - """ - Module for managing Vault Authentication Methods - """ - - def __init__(self): - log.info("Initializing Vault Auth Manager...") - - def get_remote_auth_methods(self, client, ret): - """ - Retrieve auth methods from vault - """ - log.info('Retrieving auth methods from Vault...') - auth_resp = client.sys.list_auth_methods() - - auth_methods = [] - try: - for auth_method in auth_resp['data']: - auth_methods.append( - VaultAuthMethod( - type=auth_resp[auth_method]['type'], - path=(auth_resp[auth_method]["path"] - if 'path' in auth_resp[auth_method] else auth_method), - description=auth_resp[auth_method]["description"], - config=OrderedDict( - sorted(auth_resp[auth_method]["config"].items())) - ) - ) - except Exception: - raise - - log.info('Finished retrieving auth methods from vault.') - return auth_methods - - def populate_local_auth_methods(self, configs, ret): - log.info('Populating local auth methods...') - - auth_methods = [] - try: - for auth_method in configs: - auth_config = None - extra_config = None - - if "auth_config" in auth_method: - auth_config = OrderedDict( - sorted(auth_method["auth_config"].items())) - - if "extra_config" in auth_method: - extra_config = OrderedDict( - sorted(auth_method["extra_config"].items())) - - auth_methods.append( - VaultAuthMethod( - type=auth_method["type"], - path=auth_method["path"], - description=auth_method["description"], - config=OrderedDict( - sorted(auth_method["config"].items())), - auth_config=auth_config, - extra_config=extra_config - ) - ) - log.info('Finished populating local auth methods.') - except Exception: - raise - - return auth_methods - - def configure_auth_methods(self, client, remote_methods, local_methods, ret): - log.info('Processing and configuring auth methods...') - - new_auth_methods = [] - ldap_groups = [] - - try: - for auth_method in local_methods: - log.debug('Checking if auth method "%s" is enabled...', - auth_method.path) - if auth_method in remote_methods: - log.debug( - 'Auth method "%s" is already enabled. Tuning...', auth_method.path) - client.sys.tune_auth_method( - path=auth_method.path, - description=auth_method.description, - default_lease_ttl=auth_method.config["default_lease_ttl"], - max_lease_ttl=auth_method.config["max_lease_ttl"] - ) - log.debug('Auth method "%s" is tuned.', auth_method.type) - else: - log.debug( - 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) - client.sys.enable_auth_method( - method_type=auth_method.type, - path=auth_method.path, - description=auth_method.description, - config=auth_method.config - ) - log.debug('Auth method "%s" is enabled.', auth_method.type) - new_auth_methods.append(auth_method.type) - - # Provision config for specific auth method - if auth_method.auth_config: - if auth_method.type == "ldap": - log.debug('Provisioning configuration for LDAP...') - client.auth.ldap.configure(**auth_method.auth_config) - log.debug('Configuration for LDAP is provisioned.') - else: - log.debug( - 'Auth method "%s" does not contain any specific configurations.', auth_method.type) - - if auth_method.extra_config: - log.debug( - 'Provisioning extra configurations for auth method "%s"', auth_method.type) - # Get LDAP group mapping from vault - ldap_list_group_response = client.auth.ldap.list_groups() - if ldap_list_group_response != None: - ldap_groups = ldap_list_group_response["data"]["keys"] - - log.debug("LDAP groups from vault: %s", str(ldap_groups)) - - # Update LDAP group mapping - log.debug( - 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) - local_config_groups = auth_method.extra_config["group_policy_map"] - for key in local_config_groups: - log.debug('LDAP Group ["%s"] -> Policies %s', - str(key), local_config_groups[key]) - - client.auth.ldap.create_or_update_group( - name=key, - policies=local_config_groups[key] - ) - - # Clean up LDAP group mapping - if ldap_groups != None: - for group in ldap_groups: - if group in {k.lower(): v for k, v in local_config_groups.items()}: - log.debug( - 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) - else: - log.info( - 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) - client.auth.ldap.delete_group( - name=group - ) - log.info( - 'LDAP group mapping ["%s"] deleted.', group) - else: - log.debug( - 'Auth method "%s" does not contain any extra configurations.', auth_method.type - ) - # Build return object - ret['changes']['old'] = json.loads(json.dumps( - [ob.type for ob in remote_methods])) - - if len(new_auth_methods) > 0: - ret['changes']['new'] = json.loads( - json.dumps(new_auth_methods)) - else: - ret['changes']['new'] = "No changes" - - log.info('Finished processing and configuring auth methods...') - except Exception: - raise - - def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): - log.info('Cleaning up auth methods...') - has_change = False - - try: - for auth_method in remote_methods: - if auth_method not in local_methods: - has_change = True - log.info( - 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) - client.sys.disable_auth_method( - path=auth_method.path - ) - log.info('Auth method "%s" is disabled.', auth_method.type) - - log.info('Finished cleaning up auth methods.') - if has_change: - ret['changes']['new'] = json.loads(json.dumps( - [ob.type for ob in local_methods])) - except Exception: - raise + """ + Module for managing Vault Authentication Methods + """ + + def __init__(self): + log.info("Initializing Vault Auth Manager...") + + def get_remote_auth_methods(self, client, ret): + """ + Retrieve auth methods from vault + """ + log.info('Retrieving auth methods from Vault...') + auth_resp = client.sys.list_auth_methods() + + auth_methods = [] + try: + for auth_method in auth_resp['data']: + auth_methods.append( + VaultAuthMethod( + type=auth_resp[auth_method]['type'], + path=(auth_resp[auth_method]["path"] + if 'path' in auth_resp[auth_method] else auth_method), + description=auth_resp[auth_method]["description"], + config=OrderedDict( + sorted(auth_resp[auth_method]["config"].items())) + ) + ) + except Exception: + raise + + log.info('Finished retrieving auth methods from vault.') + return auth_methods + + def populate_local_auth_methods(self, configs, ret): + log.info('Populating local auth methods...') + + auth_methods = [] + try: + for auth_method in configs: + auth_config = None + extra_config = None + + if "auth_config" in auth_method: + auth_config = OrderedDict( + sorted(auth_method["auth_config"].items())) + + if "extra_config" in auth_method: + extra_config = OrderedDict( + sorted(auth_method["extra_config"].items())) + + auth_methods.append( + VaultAuthMethod( + type=auth_method["type"], + path=auth_method["path"], + description=auth_method["description"], + config=OrderedDict( + sorted(auth_method["config"].items())), + auth_config=auth_config, + extra_config=extra_config + ) + ) + log.info('Finished populating local auth methods.') + except Exception: + raise + + return auth_methods + + def configure_auth_methods(self, client, remote_methods, local_methods, ret): + log.info('Processing and configuring auth methods...') + + new_auth_methods = [] + ldap_groups = [] + + try: + for auth_method in local_methods: + log.debug('Checking if auth method "%s" is enabled...', + auth_method.path) + if auth_method in remote_methods: + log.debug( + 'Auth method "%s" is already enabled. Tuning...', auth_method.path) + client.sys.tune_auth_method( + path=auth_method.path, + description=auth_method.description, + default_lease_ttl=auth_method.config["default_lease_ttl"], + max_lease_ttl=auth_method.config["max_lease_ttl"] + ) + log.debug('Auth method "%s" is tuned.', auth_method.type) + else: + log.debug( + 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) + client.sys.enable_auth_method( + method_type=auth_method.type, + path=auth_method.path, + description=auth_method.description, + config=auth_method.config + ) + log.debug('Auth method "%s" is enabled.', auth_method.type) + new_auth_methods.append(auth_method.type) + + # Provision config for specific auth method + if auth_method.auth_config: + if auth_method.type == "ldap": + log.debug('Provisioning configuration for LDAP...') + client.auth.ldap.configure(**auth_method.auth_config) + log.debug('Configuration for LDAP is provisioned.') + else: + log.debug( + 'Auth method "%s" does not contain any specific configurations.', auth_method.type) + + if auth_method.extra_config: + log.debug( + 'Provisioning extra configurations for auth method "%s"', auth_method.type) + # Get LDAP group mapping from vault + ldap_list_group_response = client.auth.ldap.list_groups() + if ldap_list_group_response != None: + ldap_groups = ldap_list_group_response["data"]["keys"] + + log.debug("LDAP groups from vault: %s", str(ldap_groups)) + + # Update LDAP group mapping + log.debug( + 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) + local_config_groups = auth_method.extra_config["group_policy_map"] + for key in local_config_groups: + log.debug('LDAP Group ["%s"] -> Policies %s', + str(key), local_config_groups[key]) + + client.auth.ldap.create_or_update_group( + name=key, + policies=local_config_groups[key] + ) + + # Clean up LDAP group mapping + if ldap_groups != None: + for group in ldap_groups: + if group in {k.lower(): v for k, v in local_config_groups.items()}: + log.debug( + 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) + else: + log.info( + 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) + client.auth.ldap.delete_group( + name=group + ) + log.info( + 'LDAP group mapping ["%s"] deleted.', group) + else: + log.debug( + 'Auth method "%s" does not contain any extra configurations.', auth_method.type + ) + # Build return object + ret['changes']['old'] = json.loads(json.dumps( + [ob.type for ob in remote_methods])) + + if len(new_auth_methods) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_auth_methods)) + else: + ret['changes']['new'] = "No changes" + + log.info('Finished processing and configuring auth methods...') + except Exception: + raise + + def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): + log.info('Cleaning up auth methods...') + has_change = False + + try: + for auth_method in remote_methods: + if auth_method not in local_methods: + has_change = True + log.info( + 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) + client.sys.disable_auth_method( + path=auth_method.path + ) + log.info('Auth method "%s" is disabled.', auth_method.type) + + log.info('Finished cleaning up auth methods.') + if has_change: + ret['changes']['new'] = json.loads(json.dumps( + [ob.type for ob in local_methods])) + except Exception: + raise class VaultSecretsManager(): - """ - Module for handling Vault secret engines - """ - - def __init__(self): - log.info("Initializing Vault Secret Manager...") - - def get_remote_secrets_engines(self, client, ret): - """ - Retrieve secret engines from vault server - """ - log.info('Retrieving secrets engines from Vault') - remote_secret_engines = [] - try: - log.info(client) - secrets_engines_resp = client.sys.list_mounted_secrets_engines() - for engine in secrets_engines_resp['data']: - remote_secret_engines.append( - VaultSecretEngine( - type=secrets_engines_resp[engine]['type'], - path=(secrets_engines_resp[engine]["path"] - if 'path' in secrets_engines_resp[engine] else engine), - description=secrets_engines_resp[engine]["description"], - config=OrderedDict( - sorted(secrets_engines_resp[engine]["config"].items())) - ) - ) - remote_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise - - log.info('Finished retrieving secrets engines from vault.') - return remote_secret_engines - - def populate_local_secrets_engines(self, configs, ret): - """ - Retrieving secret engines from local config file - """ - log.info('Populating local secret engines...') - local_secret_engines = [] - try: - for secret_engine in configs: - config = None - secret_config = None - extra_config = None - - if 'secret_config' in secret_engine: - if secret_engine["secret_config"] != None: - secret_config = OrderedDict( - sorted(secret_engine["secret_config"].items())) - - if 'extra_config' in secret_engine: - if secret_engine["extra_config"] != None: - extra_config = OrderedDict( - sorted(secret_engine["extra_config"].items())) - - if 'config' in secret_engine: - if secret_engine["config"] != None: - config = OrderedDict( - sorted(secret_engine["config"].items())) - - local_secret_engines.append(VaultSecretEngine( - type=secret_engine["type"], - path=secret_engine["path"], - description=secret_engine["description"], - config=config, - secret_config=secret_config, - extra_config=extra_config - )) - - local_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise - - log.info('Finished populating local secret engines.') - return local_secret_engines - - def configure_secrets_engines(self, client, remote_engines, local_engines, ret): - log.info('Processing and configuring secrets engines...') - new_secrets_engines = [] - try: - for secret_engine in local_engines: - log.debug('Checking if secret engine "%s" at path "%s" is enabled...', - secret_engine.type, - secret_engine.path) - if secret_engine in remote_engines: - log.debug( - 'Secret engine "%s" at path "%s" is already enabled. Tuning...', - secret_engine.type, - secret_engine.path) - - client.sys.tune_mount_configuration( - path=secret_engine.path, - description=secret_engine.description, - default_lease_ttl=secret_engine.config["default_lease_ttl"], - max_lease_ttl=secret_engine.config["max_lease_ttl"] - ) - log.debug('Secret engine "%s" at path "%s" is tuned.', - secret_engine.type, secret_engine.path) - else: - log.debug( - 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', - secret_engine.type, - secret_engine.path) - new_secrets_engines.append(secret_engine.type) - client.sys.enable_secrets_engine( - backend_type=secret_engine.type, - path=secret_engine.path, - description=secret_engine.description, - config=secret_engine.config - ) - log.debug('Secret engine " % s" at path " % s" is enabled.', - secret_engine.type, secret_engine.path) - - if secret_engine.secret_config != None: - log.info( - 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - client.secrets.activedirectory.configure( - **secret_engine.secret_config - ) - if secret_engine.type == 'database': - client.secrets.database.configure( - **secret_engine.secret_config - ) - - log.info( - 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.extra_config != None: - log.info( - 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - # Get roles from vault - existing_roles = None - try: - existing_roles = client.secrets.activedirectory.list_roles() - log.debug(existing_roles) - except Exception as e: - log.exception(e) - - # Add new roles - local_roles = secret_engine.extra_config['roles'] - for key in local_roles: - log.debug('AD Role ["%s"] -> Role %s', - str(key), local_roles[key]) - try: - client.secrets.activedirectory.create_or_update_role( - name=key, - service_account_name=local_roles[key]['service_account_name'], - ttl=local_roles[key]['ttl'] - ) - except Exception as e: - log.exception(e) - raise salt.exceptions.SaltInvocationError(e) - - # Remove missing roles - if existing_roles != None: - for role in existing_roles: - if role in {k.lower(): v for k, v in local_roles.items()}: - log.debug( - 'AD role ["%s"] exists in configuration, no cleanup necessary', role) - else: - log.info( - 'Ad role ["%s"] does not exists in configuration, deleting...', role) - client.secrets.activedirectory.delete_role( - name=role - ) - log.info( - 'AD role has been ["%s"] deleted.', role) - else: - log.debug( - 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type - ) - except Exception: - raise - - log.info('Finished proccessing and configuring secrets engines.') - - # Build return object - ret['changes']['old'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) - - if len(new_secrets_engines) > 0: - ret['changes']['new'] = json.loads( - json.dumps(new_secrets_engines)) - else: - ret['changes']['new'] = "No changes" - - def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): - log.info('Cleaning up secrets engines...') - has_changes = False - - try: - for secret_engine in remote_engines: - if not (secret_engine.type == "system" or - secret_engine.type == "cubbyhole" or - secret_engine.type == "identity" or - secret_engine.type == "generic"): - if secret_engine in local_engines: - log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', - secret_engine.type, secret_engine.path) - else: - log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', - secret_engine.type, secret_engine.path) - has_changes = True - client.sys.disable_secrets_engine( - path=secret_engine.path - ) - log.info('Secrets engine "%s" at path "%s" is disabled.', - secret_engine.type, secret_engine.type) - except Exception: - raise - - log.info('Finished cleaning up secrets engines.') - - if has_changes: - ret['changes']['new'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in local_engines])) + """ + Module for handling Vault secret engines + """ + + def __init__(self): + log.info("Initializing Vault Secret Manager...") + + def get_remote_secrets_engines(self, client, ret): + """ + Retrieve secret engines from vault server + """ + log.info('Retrieving secrets engines from Vault') + remote_secret_engines = [] + try: + log.info(client) + secrets_engines_resp = client.sys.list_mounted_secrets_engines() + for engine in secrets_engines_resp['data']: + remote_secret_engines.append( + VaultSecretEngine( + type=secrets_engines_resp[engine]['type'], + path=(secrets_engines_resp[engine]["path"] + if 'path' in secrets_engines_resp[engine] else engine), + description=secrets_engines_resp[engine]["description"], + config=OrderedDict( + sorted(secrets_engines_resp[engine]["config"].items())) + ) + ) + remote_secret_engines.sort(key=lambda x: x.type) + except Exception: + raise + + log.info('Finished retrieving secrets engines from vault.') + return remote_secret_engines + + def populate_local_secrets_engines(self, configs, ret): + """ + Retrieving secret engines from local config file + """ + log.info('Populating local secret engines...') + local_secret_engines = [] + try: + for secret_engine in configs: + config = None + secret_config = None + extra_config = None + + if 'secret_config' in secret_engine: + if secret_engine["secret_config"] != None: + secret_config = OrderedDict( + sorted(secret_engine["secret_config"].items())) + + if 'extra_config' in secret_engine: + if secret_engine["extra_config"] != None: + extra_config = OrderedDict( + sorted(secret_engine["extra_config"].items())) + + if 'config' in secret_engine: + if secret_engine["config"] != None: + config = OrderedDict( + sorted(secret_engine["config"].items())) + + local_secret_engines.append(VaultSecretEngine( + type=secret_engine["type"], + path=secret_engine["path"], + description=secret_engine["description"], + config=config, + secret_config=secret_config, + extra_config=extra_config + )) + + local_secret_engines.sort(key=lambda x: x.type) + except Exception: + raise + + log.info('Finished populating local secret engines.') + return local_secret_engines + + def configure_secrets_engines(self, client, remote_engines, local_engines, ret): + log.info('Processing and configuring secrets engines...') + new_secrets_engines = [] + try: + for secret_engine in local_engines: + log.debug('Checking if secret engine "%s" at path "%s" is enabled...', + secret_engine.type, + secret_engine.path) + if secret_engine in remote_engines: + log.debug( + 'Secret engine "%s" at path "%s" is already enabled. Tuning...', + secret_engine.type, + secret_engine.path) + + client.sys.tune_mount_configuration( + path=secret_engine.path, + description=secret_engine.description, + default_lease_ttl=secret_engine.config["default_lease_ttl"], + max_lease_ttl=secret_engine.config["max_lease_ttl"] + ) + log.debug('Secret engine "%s" at path "%s" is tuned.', + secret_engine.type, secret_engine.path) + else: + log.debug( + 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', + secret_engine.type, + secret_engine.path) + new_secrets_engines.append(secret_engine.type) + client.sys.enable_secrets_engine( + backend_type=secret_engine.type, + path=secret_engine.path, + description=secret_engine.description, + config=secret_engine.config + ) + log.debug('Secret engine " % s" at path " % s" is enabled.', + secret_engine.type, secret_engine.path) + + if secret_engine.secret_config != None: + log.info( + 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + client.secrets.activedirectory.configure( + **secret_engine.secret_config + ) + if secret_engine.type == 'database': + client.secrets.database.configure( + **secret_engine.secret_config + ) + + log.info( + 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.extra_config != None: + log.info( + 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + # Get roles from vault + existing_roles = None + try: + existing_roles = client.secrets.activedirectory.list_roles() + log.debug(existing_roles) + except Exception as e: + log.exception(e) + + # Add new roles + local_roles = secret_engine.extra_config['roles'] + for key in local_roles: + log.debug('AD Role ["%s"] -> Role %s', + str(key), local_roles[key]) + try: + client.secrets.activedirectory.create_or_update_role( + name=key, + service_account_name=local_roles[key]['service_account_name'], + ttl=local_roles[key]['ttl'] + ) + except Exception as e: + log.exception(e) + raise salt.exceptions.SaltInvocationError(e) + + # Remove missing roles + if existing_roles != None: + for role in existing_roles: + if role in {k.lower(): v for k, v in local_roles.items()}: + log.debug( + 'AD role ["%s"] exists in configuration, no cleanup necessary', role) + else: + log.info( + 'Ad role ["%s"] does not exists in configuration, deleting...', role) + client.secrets.activedirectory.delete_role( + name=role + ) + log.info( + 'AD role has been ["%s"] deleted.', role) + else: + log.debug( + 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type + ) + except Exception: + raise + + log.info('Finished proccessing and configuring secrets engines.') + + # Build return object + ret['changes']['old'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) + + if len(new_secrets_engines) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_secrets_engines)) + else: + ret['changes']['new'] = "No changes" + + def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): + log.info('Cleaning up secrets engines...') + has_changes = False + + try: + for secret_engine in remote_engines: + if not (secret_engine.type == "system" or + secret_engine.type == "cubbyhole" or + secret_engine.type == "identity" or + secret_engine.type == "generic"): + if secret_engine in local_engines: + log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', + secret_engine.type, secret_engine.path) + else: + log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', + secret_engine.type, secret_engine.path) + has_changes = True + client.sys.disable_secrets_engine( + path=secret_engine.path + ) + log.info('Secrets engine "%s" at path "%s" is disabled.', + secret_engine.type, secret_engine.type) + except Exception: + raise + + log.info('Finished cleaning up secrets engines.') + + if has_changes: + ret['changes']['new'] = json.loads(json.dumps([ + "Type: {} - Path: {}".format(ob.type, ob.path) for ob in local_engines])) class VaultAuditManager(): - """ - Module for handling Vault audit devices - """ - - def __init__(self): - log.info("Initializing Vault Audit Manager...") - - def get_remote_audit_devices(self, client, ret): - log.info("Retrieving audit devices from vault...") - devices = [] - try: - audit_devices_resp = client.sys.list_enabled_audit_devices() - for device in audit_devices_resp['data']: - audit_device = audit_devices_resp[device] - devices.append( - VaultAuditDevice( - type=audit_device['type'], - path=(audit_device["path"] - if 'path' in audit_device else device), - description=audit_device["description"], - options=json.dumps(audit_device["options"]) - ) - ) - - log.info('Finished retrieving audit devices from vault.') - except Exception: - raise - - return devices - - def get_local_audit_devices(self, configs, ret): - log.info("Loading audit devices from local config...") - devices = [] - if configs: - try: - for audit_device in configs: - if 'options' in audit_device: - options = json.dumps(audit_device["options"]) - log.debug(options) - - devices.append( - VaultAuditDevice( - type=audit_device["type"], - path=audit_device["path"], - description=audit_device["description"], - options=options - ) - ) - - log.info('Finished loading audit devices from local config.') - except Exception: - raise - - return devices - - def configure_audit_devices(self, client, remote_devices, local_devices, ret): - log.info('Processing and configuring audit devices...') - new_audit_devices = [] - try: - for audit_device in local_devices: - log.debug('Checking if audit device "%s" at path "%s" is enabled...', - audit_device.type, audit_device.path) - - if audit_device in remote_devices: - log.debug('Audit device "%s" at path "%s" is already enabled.', - audit_device.type, audit_device.path) - else: - log.debug( - 'Audit device "%s" at path "%s" is not enabled. Enabling now...', audit_device.type, audit_device.path) - new_audit_devices.append(audit_device.type) - client.sys.enable_audit_device( - device_type=audit_device.type, - path=audit_device.path, - description=audit_device.description, - options=json.loads(audit_device.options) - ) - log.debug('Audit device "%s" at path "%s" is enabled.', - audit_device.type, audit_device.path) - - log.info('Finished processing audit devices.') - # Build return object - ret['changes']['old'] = json.loads(json.dumps( - [ob.type for ob in remote_devices])) - - if len(new_audit_devices) > 0: - ret['changes']['new'] = json.loads( - json.dumps(new_audit_devices)) - else: - ret['changes']['new'] = "No changes" - - except Exception: - raise - - def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): - log.info('Cleaning up audit devices...') - has_changes = False - try: - for audit_device in remote_devices: - if audit_device not in local_devices: - log.info('Disabling audit device "%s" at path "%s"...', - audit_device.type, audit_device.path) - has_changes = True - client.sys.disable_audit_device( - path=audit_device.path - ) - log.info('Finished cleaning up audit devices.') - - if has_changes: - ret['changes']['new'] = json.loads(json.dumps( - [ob.type for ob in local_devices])) - except Exception: - raise + """ + Module for handling Vault audit devices + """ + + def __init__(self): + log.info("Initializing Vault Audit Manager...") + + def get_remote_audit_devices(self, client, ret): + log.info("Retrieving audit devices from vault...") + devices = [] + try: + audit_devices_resp = client.sys.list_enabled_audit_devices() + for device in audit_devices_resp['data']: + audit_device = audit_devices_resp[device] + devices.append( + VaultAuditDevice( + type=audit_device['type'], + path=(audit_device["path"] + if 'path' in audit_device else device), + description=audit_device["description"], + options=json.dumps(audit_device["options"]) + ) + ) + + log.info('Finished retrieving audit devices from vault.') + except Exception: + raise + + return devices + + def get_local_audit_devices(self, configs, ret): + log.info("Loading audit devices from local config...") + devices = [] + if configs: + try: + for audit_device in configs: + if 'options' in audit_device: + options = json.dumps(audit_device["options"]) + log.debug(options) + + devices.append( + VaultAuditDevice( + type=audit_device["type"], + path=audit_device["path"], + description=audit_device["description"], + options=options + ) + ) + + log.info('Finished loading audit devices from local config.') + except Exception: + raise + + return devices + + def configure_audit_devices(self, client, remote_devices, local_devices, ret): + log.info('Processing and configuring audit devices...') + new_audit_devices = [] + try: + for audit_device in local_devices: + log.debug('Checking if audit device "%s" at path "%s" is enabled...', + audit_device.type, audit_device.path) + + if audit_device in remote_devices: + log.debug('Audit device "%s" at path "%s" is already enabled.', + audit_device.type, audit_device.path) + else: + log.debug( + 'Audit device "%s" at path "%s" is not enabled. Enabling now...', + audit_device.type, + audit_device.path + ) + new_audit_devices.append(audit_device.type) + client.sys.enable_audit_device( + device_type=audit_device.type, + path=audit_device.path, + description=audit_device.description, + options=json.loads(audit_device.options) + ) + log.debug('Audit device "%s" at path "%s" is enabled.', + audit_device.type, audit_device.path) + + log.info('Finished processing audit devices.') + # Build return object + ret['changes']['old'] = json.loads(json.dumps( + [ob.type for ob in remote_devices])) + + if len(new_audit_devices) > 0: + ret['changes']['new'] = json.loads( + json.dumps(new_audit_devices)) + else: + ret['changes']['new'] = "No changes" + + except Exception: + raise + + def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): + log.info('Cleaning up audit devices...') + has_changes = False + try: + for audit_device in remote_devices: + if audit_device not in local_devices: + log.info('Disabling audit device "%s" at path "%s"...', + audit_device.type, audit_device.path) + has_changes = True + client.sys.disable_audit_device( + path=audit_device.path + ) + log.info('Finished cleaning up audit devices.') + + if has_changes: + ret['changes']['new'] = json.loads(json.dumps( + [ob.type for ob in local_devices])) + except Exception: + raise diff --git a/salt/_states/vault.py b/salt/_states/vault.py index 702bff9..042cefc 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -6,283 +6,283 @@ log = logging.getLogger(__name__) try: - import hvac - import boto3 - DEPS_INSTALLED = True + import hvac + import boto3 + DEPS_INSTALLED = True except ImportError as e: - log.debug('Unable to import the libraries.') - log.exception(e) - DEPS_INSTALLED = False + log.debug('Unable to import the libraries.') + log.exception(e) + DEPS_INSTALLED = False __all__ = ['initialize'] def __virtual__(): - return DEPS_INSTALLED + return DEPS_INSTALLED def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): - """ - Ensure that the vault instance has been initialized and run the - initialization if it has not. Storing the root token to SSM parameter - - Arguments: - name {string} -- The id used for the state definition - ssm_path {string} -- The path to SSM parameter that will store the root token - - Keyword Arguments: - recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) - recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) - - Returns: - ret {dict} -- Result of the execution - """ - ret = {'name': name, - 'comment': '', - 'result': '', - 'changes': {}} - - client = __utils__['vault.build_client']() - - is_initialized = client.sys.is_initialized() - - if is_initialized: - ret['result'] = True - ret['comment'] = 'Vault is already initialized' - else: - result = client.sys.initialize( - recovery_shares=recovery_shares, - recovery_threshold=recovery_threshold - ) - root_token = result['root_token'] - recovery_keys = result['recovery_keys'] - is_success = client.sys.is_initialized() - - ret['result'] = is_success - ret['changes'] = { - 'root_credentials': { - 'new': { - 'recover_keys': '/{}/{}'.format(ssm_path, 'recovery_keys'), - 'root_token': '/{}/{}'.format(ssm_path, 'root_token') - }, - 'old': {} - } - } - - # upload root token ssm parameter store - if is_success: - ssm_client = boto3.client('ssm') - # saving root token - ssm_client.put_parameter( - Name='/{}/{}'.format(ssm_path, 'root_token'), - Value=root_token, - Type="SecureString", - Overwrite=True - ) - - # saving recovery keys - ssm_client.put_parameter( - Name='/{}/{}'.format(ssm_path, 'recovery_keys'), - Value=json.dumps(recovery_keys), - Type="SecureString", - Overwrite=True - ) - - ret['comment'] = 'Vault has {}initialized'.format( - '' if is_success else 'failed to be ') - return ret + """ + Ensure that the vault instance has been initialized and run the + initialization if it has not. Storing the root token to SSM parameter + + Arguments: + name {string} -- The id used for the state definition + ssm_path {string} -- The path to SSM parameter that will store the root token + + Keyword Arguments: + recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) + recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) + + Returns: + ret {dict} -- Result of the execution + """ + ret = {'name': name, + 'comment': '', + 'result': '', + 'changes': {}} + + client = __utils__['vault.build_client']() + + is_initialized = client.sys.is_initialized() + + if is_initialized: + ret['result'] = True + ret['comment'] = 'Vault is already initialized' + else: + result = client.sys.initialize( + recovery_shares=recovery_shares, + recovery_threshold=recovery_threshold + ) + root_token = result['root_token'] + recovery_keys = result['recovery_keys'] + is_success = client.sys.is_initialized() + + ret['result'] = is_success + ret['changes'] = { + 'root_credentials': { + 'new': { + 'recover_keys': '/{}/{}'.format(ssm_path, 'recovery_keys'), + 'root_token': '/{}/{}'.format(ssm_path, 'root_token') + }, + 'old': {} + } + } + + # upload root token ssm parameter store + if is_success: + ssm_client = boto3.client('ssm') + # saving root token + ssm_client.put_parameter( + Name='/{}/{}'.format(ssm_path, 'root_token'), + Value=root_token, + Type="SecureString", + Overwrite=True + ) + + # saving recovery keys + ssm_client.put_parameter( + Name='/{}/{}'.format(ssm_path, 'recovery_keys'), + Value=json.dumps(recovery_keys), + Type="SecureString", + Overwrite=True + ) + + ret['comment'] = 'Vault has {}initialized'.format( + '' if is_success else 'failed to be ') + return ret def secret_engines_synced(name, configs=[]): - """ - Ensure secrets engines are synced with Vault + """ + Ensure secrets engines are synced with Vault - Arguments: - name {string} -- The id used for the state definition + Arguments: + name {string} -- The id used for the state definition - Keyword Arguments: - configs {list} -- A list of configuration rules that defined the secrets engines (default: []) + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the secrets engines (default: []) - Returns: - ret {dict} -- Result of the execution - """ + Returns: + ret {dict} -- Result of the execution + """ - client = __utils__['vault.build_client']() - remote_secret_engines = [] - local_secret_engines = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } + client = __utils__['vault.build_client']() + remote_secret_engines = [] + local_secret_engines = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } - log.debug(json.dumps(configs)) + log.debug(json.dumps(configs)) - secretsManager = __salt__['vault.get_secret_engines_manager']() + secretsManager = __salt__['vault.get_secret_engines_manager']() - try: - remote_secret_engines = secretsManager.get_remote_secrets_engines( - client, ret) + try: + remote_secret_engines = secretsManager.get_remote_secrets_engines( + client, ret) - local_secret_engines = secretsManager.populate_local_secrets_engines( - configs, ret) + local_secret_engines = secretsManager.populate_local_secrets_engines( + configs, ret) - secretsManager.configure_secrets_engines( - client, - remote_secret_engines, - local_secret_engines, - ret - ) + secretsManager.configure_secrets_engines( + client, + remote_secret_engines, + local_secret_engines, + ret + ) - secretsManager.cleanup_secrets_engines( - client, - remote_secret_engines, - local_secret_engines, - ret - ) + secretsManager.cleanup_secrets_engines( + client, + remote_secret_engines, + local_secret_engines, + ret + ) - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) - return ret + return ret def auth_methods_synced(name, configs=[]): - """ - Ensure authentication methods are synced with Vault - - Arguments: - name {string} -- The id used for the state definition - - Keyword Arguments: - configs {list} -- A list of configuration rules that defined the authentication methods (default: []) - Returns: - ret {dict} -- Result of the execution - """ - - client = __utils__['vault.build_client']() - remote_auth_methods = [] - local_auth_methods = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } - - authsManager = __salt__['vault.get_auth_methods_manager']() - - try: - - remote_auth_methods = authsManager.get_remote_auth_methods(client, ret) - local_auth_methods = authsManager.populate_local_auth_methods( - configs, ret) - - authsManager.configure_auth_methods( - client, - remote_auth_methods, - local_auth_methods, - ret - ) - - authsManager.cleanup_auth_methods( - client, - remote_auth_methods, - local_auth_methods, - ret - ) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - - return ret + """ + Ensure authentication methods are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the authentication methods (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_auth_methods = [] + local_auth_methods = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + authsManager = __salt__['vault.get_auth_methods_manager']() + + try: + + remote_auth_methods = authsManager.get_remote_auth_methods(client, ret) + local_auth_methods = authsManager.populate_local_auth_methods( + configs, ret) + + authsManager.configure_auth_methods( + client, + remote_auth_methods, + local_auth_methods, + ret + ) + + authsManager.cleanup_auth_methods( + client, + remote_auth_methods, + local_auth_methods, + ret + ) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + + return ret def policies_synced(name, policies=[]): - """ - Ensure policies are synced with Vault - - Arguments: - name {string} -- The id used for the state definition - - Keyword Arguments: - policies {list} -- A list of policies to by synced with Vault (default: []) - Returns: - ret {dict} -- Result of the execution - """ - - client = __utils__['vault.build_client']() - remote_policies = [] - local_policies = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } - - policiesManager = __salt__['vault.get_policies_manager']() - - try: - remote_policies = policiesManager.get_remote_policies(client, ret) - local_policies = json.loads(json.dumps(policies)) - policiesManager.push_policies( - client, remote_policies, local_policies, ret) - policiesManager.cleanup_policies( - client, remote_policies, local_policies, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - return ret + """ + Ensure policies are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + policies {list} -- A list of policies to by synced with Vault (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_policies = [] + local_policies = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + policiesManager = __salt__['vault.get_policies_manager']() + + try: + remote_policies = policiesManager.get_remote_policies(client, ret) + local_policies = json.loads(json.dumps(policies)) + policiesManager.push_policies( + client, remote_policies, local_policies, ret) + policiesManager.cleanup_policies( + client, remote_policies, local_policies, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + return ret def audit_devices_synced(name, configs=[]): - """ - Ensures audit devices are synced with Vault - - Arguments: - name {string} -- The id used for the state definition - - Keyword Arguments: - configs {list} -- A list of configuration rules that defined the audit devices (default: []) - Returns: - ret {dict} -- Result of the execution - """ - - client = __utils__['vault.build_client']() - remote_devices = [] - local_devices = [] - ret = { - 'name': name, - 'comment': '', - 'result': '', - 'changes': {} - } - - auditDevicesManager = __salt__['vault.get_audit_device_manager']() - try: - - remote_devices = auditDevicesManager.get_remote_audit_devices( - client, ret) - - local_devices = auditDevicesManager.get_local_audit_devices( - configs, ret) - - auditDevicesManager.configure_audit_devices( - client, remote_devices, local_devices, ret) - - auditDevicesManager.cleanup_audit_devices( - client, remote_devices, local_devices, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - return ret + """ + Ensures audit devices are synced with Vault + + Arguments: + name {string} -- The id used for the state definition + + Keyword Arguments: + configs {list} -- A list of configuration rules that defined the audit devices (default: []) + Returns: + ret {dict} -- Result of the execution + """ + + client = __utils__['vault.build_client']() + remote_devices = [] + local_devices = [] + ret = { + 'name': name, + 'comment': '', + 'result': '', + 'changes': {} + } + + auditDevicesManager = __salt__['vault.get_audit_device_manager']() + try: + + remote_devices = auditDevicesManager.get_remote_audit_devices( + client, ret) + + local_devices = auditDevicesManager.get_local_audit_devices( + configs, ret) + + auditDevicesManager.configure_audit_devices( + client, remote_devices, local_devices, ret) + + auditDevicesManager.cleanup_audit_devices( + client, remote_devices, local_devices, ret) + + ret['result'] = True + except Exception as e: + ret['result'] = False + log.exception(e) + return ret diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index 5df82e5..a74ce72 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -16,35 +16,35 @@ def build_client(url=None, token=None): - vault_url = url if url != None else get_vault_url() - client = hvac.Client( - url=vault_url, - token=token - ) + vault_url = url if url != None else get_vault_url() + client = hvac.Client( + url=vault_url, + token=token + ) - if token == None: - client.token = os.environ.get('VAULT_TOKEN') + if token == None: + client.token = os.environ.get('VAULT_TOKEN') - return client + return client def get_vault_url(): - ''' - Returns a string consist of url and port number - ''' - port = __grains__['vault']['api_port'] if __grains__[ - 'vault']['api_port'] != None else 8200 - url = "http://localhost" + ''' + Returns a string consist of url and port number + ''' + port = __grains__['vault']['api_port'] if __grains__[ + 'vault']['api_port'] != None else 8200 + url = "http://localhost" - return "{}:{}".format(url, port) + return "{}:{}".format(url, port) def load_config_file(config_path): - configs = None - with open(os.path.join(config_path), 'r') as fd: - try: - configs = yaml.load(fd) - except yaml.YAMLError as e: - log.critical("Unable to load conf file: " + str(e)) - return False - return configs + configs = None + with open(os.path.join(config_path), 'r') as fd: + try: + configs = yaml.load(fd) + except yaml.YAMLError as e: + log.critical("Unable to load conf file: " + str(e)) + return False + return configs diff --git a/salt/vault/map.jinja b/salt/vault/map.jinja index 786f509..0866d6e 100644 --- a/salt/vault/map.jinja +++ b/salt/vault/map.jinja @@ -4,7 +4,7 @@ {% import_yaml "vault/maps/osfamilymap.yaml" or {} as osfamilymap %} {% import_yaml "vault/maps/initfamilymap.yaml" or {} as initfamilymap %} -{%- set merged_defaults = salt.grains.filter_by(defaults, +{%- set vault = salt.grains.filter_by(defaults, default='vault', merge=salt.grains.filter_by(osfamilymap, grain='os_family', merge=salt.grains.filter_by(initfamilymap, grain='init', @@ -12,6 +12,3 @@ ) ) ) %} - -{#- Merge the vault pillar #} -{%- set vault = salt.pillar.get('vault', default=merged_defaults, merge=True) %} diff --git a/salt/vault/maps/defaults.yaml b/salt/vault/maps/defaults.yaml index b9b0d80..db1b28b 100644 --- a/salt/vault/maps/defaults.yaml +++ b/salt/vault/maps/defaults.yaml @@ -19,4 +19,3 @@ vault: region: "" dynamodb_table: "" kms_key_id: "" - diff --git a/scripts/appscript.sh b/scripts/appscript.sh index ceaa2ed..8ccbae4 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -7,7 +7,8 @@ SALT_DIR="/srv/salt" ARCHIVE_FILE_NAME="salt_formula.zip" # Standard aws envs -export AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') +AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') +export AWS_DEFAULT_REGION yum install unzip jq -y @@ -26,7 +27,7 @@ rm $ARCHIVE_FILE_NAME echo "[appscript]: Configuring salt to read ec2 metadata into grains..." echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf -echo "[appscript]: Setting required salt grains for vault..." +echo "[appscript]: Setting the required salt grains for vault..." salt-call --local grains.setval vault ${salt_grains_json} echo "[appscript]: Update minion config to allow module.run..." @@ -39,26 +40,37 @@ echo "[appscript]: Updating salt states/modules/utils/grains..." salt-call --local saltutil.sync_all echo "[appscript]: Retrieving path for directory storing log files..." -export LOGS_DIR=$(salt-call --local grains.get 'vault:logs_path' --output=json | jq .[] -r) +LOGS_DIR=$(salt-call --local grains.get 'vault:logs_path' --output=json | jq .[] -r) +export LOGS_DIR echo "[appscript]: Ensuring logs dir location exists, $LOGS_DIR..." mkdir -p $LOGS_DIR echo "[appscript]: Installing vault and configuring service, firewall..." -salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee $LOGS_DIR/salt_call.log +salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee $LOGS_DIR/state.vault.log echo "[appscript]: Initializing vault..." -salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee $LOGS_DIR/initialize.log +salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee $LOGS_DIR/state.vault.initialize.log echo "[appscript]: Sync configurations with the vault..." -export SSM_PATH=$(salt-call --local grains.get 'vault:ssm_path' --output=json | jq .[] -r) -export VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') -salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_DIR/sync_config.log +SSM_PATH=$(salt-call --local grains.get 'vault:ssm_path' --output=json | jq .[] -r) +export SSM_PATH + +VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') +export VAULT_TOKEN + +salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_DIR/state.vault.sync.log echo "[appscript]: Retrieving Vault's status" -# Vault local address -export API_PORT=$(salt-call --local grains.get 'vault:api_port' --output=json | jq .[]) -export VAULT_ADDR=http://127.0.0.1:$API_PORT +# Get api port for vault server +API_PORT=$(salt-call --local grains.get 'vault:api_port' --output=json | jq .[]) +export API_PORT + +# Set up vault address +VAULT_ADDR=http://127.0.0.1:$API_PORT +export VAULT_ADDR + +# Retrieve vault status vault status echo "[appscript]: Completed appscript vault successfully!" diff --git a/variables.tf b/variables.tf index bfccffa..6965669 100644 --- a/variables.tf +++ b/variables.tf @@ -23,12 +23,6 @@ variable "ami_owner" { description = "Account id/alias of the AMI owner" } -variable "additional_ips_allow_inbound" { - type = list(string) - description = "List of ip address that allow to have access to resources" - default = [] -} - variable "ec2_extra_security_group_ids" { type = list(string) description = "List of additional security groups to add to EC2 instances" @@ -99,12 +93,6 @@ variable "vault_configs_path" { default = null } -variable "enable_access_logs" { - type = bool - description = "Boolean indicating whether to enable access logs for load balancer" - default = false -} - variable "instance_type" { type = string description = "Amazon EC2 instance type" From 6f9744f05b61ffdedab0e7ae730ce809a362cf06 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 11 Sep 2019 19:55:14 -0400 Subject: [PATCH 24/34] Removes -x flag from files Flag x was set to files, suspecting caused by vscode --- .bumpversion.cfg | 0 .editorconfig | 0 .gitignore | 0 .travis.yml | 0 _docs/MAIN.md | 0 5 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 .bumpversion.cfg mode change 100755 => 100644 .editorconfig mode change 100755 => 100644 .gitignore mode change 100755 => 100644 .travis.yml mode change 100755 => 100644 _docs/MAIN.md diff --git a/.bumpversion.cfg b/.bumpversion.cfg old mode 100755 new mode 100644 diff --git a/.editorconfig b/.editorconfig old mode 100755 new mode 100644 diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 diff --git a/.travis.yml b/.travis.yml old mode 100755 new mode 100644 diff --git a/_docs/MAIN.md b/_docs/MAIN.md old mode 100755 new mode 100644 From 9308ebce445a9083fb4165a4ebf218e8e75dfd74 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Thu, 12 Sep 2019 14:10:08 -0400 Subject: [PATCH 25/34] Performs clean up on terraform module making certificate_arn optional, automatically create a certificate if none is supplied since certifiate is required for https lb listenter --- README.md | 7 +-- main.tf | 94 +++++++++++++++++++++++------------- tests/vault-py2/main.tf | 3 +- tests/vault-py2/variables.tf | 10 +++- tests/vault-py3/main.tf | 4 +- tests/vault-py3/variables.tf | 4 +- variables.tf | 21 +++++--- 7 files changed, 93 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index 4f35c59..afb9732 100755 --- a/README.md +++ b/README.md @@ -6,10 +6,11 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| ami\_name\_filter | Will be use to filter out AMI | string | `"spel-minimal-centos-7-hvm-*.x86_64-gp2"` | no | +| ami\_name\_filters | Will be use to filter out AMI | list(string) | `` | no | | ami\_name\_regex | Regex to help fine-grain filtering AMI | string | `"spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2"` | no | -| ami\_owner | Account id/alias of the AMI owner | string | n/a | yes | +| ami\_owners | Account id/alias of the AMI owners | list(string) | n/a | yes | | api\_port | The port to use for Vault API calls | string | `"8200"` | no | +| certificate\_arn | The ARN of the default SSL server certificate to be use for HTTPS lb listener. | string | `"null"` | no | | cfn\_bootstrap\_utils\_url | (Optional) URL to aws-cfn-bootstrap-latest.tar.gz | string | `"https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz"` | no | | cfn\_endpoint\_url | (Optional) URL to the CloudFormation Endpoint. e.g. https://cloudformation.us-east-1.amazonaws.com | string | `"https://cloudformation.us-east-1.amazonaws.com"` | no | | cloudwatch\_agent\_url | (Optional) S3 URL to CloudWatch Agent installer. Example: s3://amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip | string | `""` | no | @@ -29,7 +30,7 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | instance\_type | Amazon EC2 instance type | string | `"t2.medium"` | no | | key\_pair\_name | Keypair to associate to launched instances | string | n/a | yes | | kms\_key\_id | Id of an AWS KMS key use for auto unseal operation when vault is intialize | string | `"null"` | no | -| lb\_internal | Boolean indicating whether the load balancer is internal or external | bool | `"false"` | no | +| lb\_internal | Boolean indicating whether the load balancer is internal or external | bool | `"true"` | no | | lb\_ssl\_policy | The name of the SSL Policy for the listener | string | `"ELBSecurityPolicy-FS-2018-06"` | no | | lb\_subnet\_ids | List of subnets to associate to the Load Balancer | list(string) | n/a | yes | | max\_capacity | (Optional) Maximum number of instances in the Autoscaling Group | string | `"2"` | no | diff --git a/main.tf b/main.tf index 9885eec..878da56 100644 --- a/main.tf +++ b/main.tf @@ -19,15 +19,17 @@ locals { logs_path = "/var/log/vault" default_enabled_repos = ["epel"] default_inbound_cdirs = ["10.0.0.0/16", "10.0.0.0/8"] - appscript_url = join("/", [module.s3_bucket.id, random_string.this.result, local.appscript_file_name]) + s3_bucket_name = join("-", [var.name, random_string.this.result]) + appscript_url = join("/", [module.s3_bucket.id, local.appscript_file_name]) archive_dir_path = join("/", [path.module, ".files"]) appscript_dir_path = join("/", [path.module, "scripts"]) role_name = join("-", [upper(var.name), "INSTANCE", data.aws_caller_identity.current.account_id]) ssm_root_path = join("/", ["vault", var.environment, data.aws_caller_identity.current.account_id, var.name]) - s3_salt_vault_content = join("/", [module.s3_bucket.id, random_string.this.result, local.archive_file_name]) - s3_vault_configuration = var.vault_configs_path == null ? "" : join("/", [module.s3_bucket.id, random_string.this.result, local.configs_file_name]) + s3_salt_vault_content = join("/", [module.s3_bucket.id, local.archive_file_name]) + s3_vault_configuration = var.vault_configs_path == null ? "" : join("/", [module.s3_bucket.id, local.configs_file_name]) dynamodb_table = var.dynamodb_table == null ? join("", aws_dynamodb_table.this.*.id) : var.dynamodb_table kms_key_id = var.kms_key_id == null ? join("", aws_kms_key.this.*.id) : var.kms_key_id + certificate_arn = var.certificate_arn == null ? join("", aws_acm_certificate.this.*.id) : var.certificate_arn vault_url = var.vault_url == null ? join(".", [var.name, var.domain_name]) : var.vault_url # Logs files to be streamed to CloudWatch Logs @@ -60,11 +62,11 @@ data "aws_region" "current" { data "aws_ami" "this" { most_recent = "true" - owners = [var.ami_owner] + owners = var.ami_owners name_regex = var.ami_name_regex filter { name = "name" - values = [var.ami_name_filter] + values = var.ami_name_filters } } @@ -87,12 +89,6 @@ data "archive_file" "configs" { output_path = join("/", [local.archive_dir_path, local.configs_file_name]) } -data "aws_acm_certificate" "this" { - domain = join(".", ["*", var.domain_name]) - types = ["AMAZON_ISSUED"] - most_recent = true -} - data "template_file" "appscript" { template = file(join("/", [local.appscript_dir_path, local.appscript_file_name])) @@ -118,7 +114,7 @@ module "s3_bucket" { source = "terraform-aws-modules/s3-bucket/aws" version = "0.0.1" - bucket = var.name + bucket = local.s3_bucket_name } @@ -133,7 +129,7 @@ module "iam" { role_name = local.role_name policy_vars = { - bucket_name = var.name + bucket_name = module.s3_bucket.id dynamodb_table = local.dynamodb_table kms_key_id = local.kms_key_id stack_name = var.name @@ -144,13 +140,14 @@ module "iam" { # Generate a random id for each deployment resource "random_string" "this" { length = 8 - special = "false" + special = false + upper = false } # Manage archive and appscript files resource "aws_s3_bucket_object" "salt_zip" { bucket = module.s3_bucket.id - key = join("/", [random_string.this.result, local.archive_file_name]) + key = local.archive_file_name source = join("/", [local.archive_dir_path, local.archive_file_name]) etag = data.archive_file.salt.output_md5 } @@ -158,17 +155,32 @@ resource "aws_s3_bucket_object" "salt_zip" { resource "aws_s3_bucket_object" "configs_zip" { count = var.vault_configs_path == null ? 0 : 1 bucket = module.s3_bucket.id - key = join("/", [random_string.this.result, local.configs_file_name]) + key = local.configs_file_name source = join("/", [local.archive_dir_path, local.configs_file_name]) etag = data.archive_file.configs[count.index].output_md5 } resource "aws_s3_bucket_object" "app_script" { bucket = module.s3_bucket.id - key = join("/", [random_string.this.result, local.appscript_file_name]) + key = local.appscript_file_name content = data.template_file.appscript.rendered etag = md5(data.template_file.appscript.rendered) } + +# Manage KMS key +resource "aws_kms_alias" "this" { + count = var.kms_key_id == null ? 1 : 0 + name = "alias/${var.name}" + target_key_id = join("", aws_kms_key.this.*.key_id) +} + +resource "aws_kms_key" "this" { + count = var.kms_key_id == null ? 1 : 0 + description = "KMS Key for ${var.name}" + + tags = merge({ Name = var.name }, local.tags) +} + # Manage domain record resource "aws_route53_record" "this" { zone_id = var.route53_zone_id @@ -182,25 +194,41 @@ resource "aws_route53_record" "this" { } } -# Manage KMS key -resource "aws_kms_alias" "this" { - count = var.kms_key_id == null ? 1 : 0 - name = "alias/${var.name}" - target_key_id = join("", aws_kms_key.this.*.key_id) -} +# Manage certificate +resource "aws_acm_certificate" "this" { + count = var.certificate_arn == null ? 1 : 0 -resource "aws_kms_key" "this" { - count = var.kms_key_id == null ? 1 : 0 - description = "KSM Key for ${var.name}" - deletion_window_in_days = 10 + domain_name = local.vault_url + validation_method = "DNS" tags = merge({ Name = var.name }, local.tags) + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_route53_record" "cert_validation" { + count = var.certificate_arn == null ? 1 : 0 + + name = join("", aws_acm_certificate.this.*.domain_validation_options.0.resource_record_name) + type = join("", aws_acm_certificate.this.*.domain_validation_options.0.resource_record_type) + zone_id = var.route53_zone_id + records = ["${join("", aws_acm_certificate.this.*.domain_validation_options.0.resource_record_value)}"] + ttl = 60 +} + +resource "aws_acm_certificate_validation" "this" { + count = var.certificate_arn == null ? 1 : 0 + + certificate_arn = join("", aws_acm_certificate.this.*.arn) + validation_record_fqdns = ["${join("", aws_route53_record.cert_validation.*.fqdn)}"] } # Manage load balancer resource "aws_lb" "this" { name = var.name - internal = "false" + internal = var.lb_internal security_groups = [aws_security_group.lb.id] subnets = var.lb_subnet_ids @@ -228,7 +256,7 @@ resource "aws_lb_listener" "https" { port = "443" protocol = "HTTPS" ssl_policy = var.lb_ssl_policy - certificate_arn = data.aws_acm_certificate.this.arn + certificate_arn = local.certificate_arn default_action { target_group_arn = aws_lb_target_group.this.arn @@ -265,7 +293,7 @@ resource "aws_lb_target_group" "this" { # Manage security groups resource "aws_security_group" "lb" { name = "${var.name}-lb" - description = "Allow web traffic to the load balancer" + description = "Allow web traffic to the ${var.name} load balancer" vpc_id = local.vpc_id ingress { @@ -294,7 +322,7 @@ resource "aws_security_group" "lb" { resource "aws_security_group" "ec2" { name = "${var.name}-ec2" - description = "Allow vault traffic between ALB and EC2 instances" + description = "Allow vault traffic between ${var.name} ALB and EC2 instances" vpc_id = local.vpc_id ingress { @@ -393,8 +421,8 @@ module "autoscaling_group" { KeyPairName = var.key_pair_name InstanceRole = module.iam.profile_name InstanceType = var.instance_type - NoReboot = "true" - NoPublicIp = "false" + NoReboot = true + NoPublicIp = true PypiIndexUrl = var.pypi_index_url SecurityGroupIds = join(",", compact(concat([aws_security_group.ec2.id], var.ec2_extra_security_group_ids))) SubnetIds = join(",", var.ec2_subnet_ids) diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf index 4099804..71f5fa0 100644 --- a/tests/vault-py2/main.tf +++ b/tests/vault-py2/main.tf @@ -12,7 +12,7 @@ module "base" { environment = var.environment desired_capacity = 1 - ami_owner = var.ami_owner + ami_owners = var.ami_owners name = "${random_id.name.hex}-py2" key_pair_name = var.key_pair_name @@ -24,6 +24,7 @@ module "base" { domain_name = var.domain_name route53_zone_id = var.route53_zone_id + certificate_arn = var.certificate_arn # Vault settings vault_version = var.vault_version diff --git a/tests/vault-py2/variables.tf b/tests/vault-py2/variables.tf index 8eb9b08..78ee17d 100644 --- a/tests/vault-py2/variables.tf +++ b/tests/vault-py2/variables.tf @@ -9,9 +9,9 @@ variable "key_pair_name" { type = string } -variable "ami_owner" { +variable "ami_owners" { description = "Account id/alias of the AMI owner" - type = string + type = list(string) } variable "ec2_subnet_ids" { @@ -45,6 +45,12 @@ variable "route53_zone_id" { description = "Hosted zone ID Route 53 hosted zone" } +variable "certificate_arn" { + type = string + description = "The ARN of the default SSL server certificate to be use for HTTPS lb listener." + default = null +} + variable "dynamodb_table" { description = "Name of the Dynamodb to be used as storage backend for Vault" type = string diff --git a/tests/vault-py3/main.tf b/tests/vault-py3/main.tf index ac9309a..210af4c 100644 --- a/tests/vault-py3/main.tf +++ b/tests/vault-py3/main.tf @@ -12,8 +12,8 @@ module "vault-py3" { source = "../../" environment = var.environment - desired_capacity = 2 - ami_owner = var.ami_owner + desired_capacity = 1 + ami_owners = var.ami_owners name = "${random_id.name.hex}-py3" key_pair_name = var.key_pair_name diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf index 5a27c44..0183b38 100644 --- a/tests/vault-py3/variables.tf +++ b/tests/vault-py3/variables.tf @@ -9,9 +9,9 @@ variable "key_pair_name" { type = string } -variable "ami_owner" { +variable "ami_owners" { description = "Account id/alias of the AMI owner" - type = string + type = list(string) } variable "ec2_subnet_ids" { diff --git a/variables.tf b/variables.tf index 6965669..73972c8 100644 --- a/variables.tf +++ b/variables.tf @@ -18,9 +18,9 @@ variable "key_pair_name" { description = "Keypair to associate to launched instances" } -variable "ami_owner" { - type = string - description = "Account id/alias of the AMI owner" +variable "ami_owners" { + type = list(string) + description = "Account id/alias of the AMI owners" } variable "ec2_extra_security_group_ids" { @@ -59,6 +59,7 @@ variable "route53_zone_id" { type = string description = "Hosted zone ID Route 53 hosted zone" } + # --------------------------------------------------------------------------------------------------------------------- # OPTIONAL PARAMETERS # These parameters have reasonable defaults. @@ -75,10 +76,10 @@ variable "dynamodb_table" { default = null } -variable "ami_name_filter" { - type = string +variable "ami_name_filters" { + type = list(string) description = "Will be use to filter out AMI" - default = "spel-minimal-centos-7-hvm-*.x86_64-gp2" + default = ["spel-minimal-centos-7-hvm-*.x86_64-gp2"] } variable "ami_name_regex" { @@ -102,7 +103,13 @@ variable "instance_type" { variable "lb_internal" { type = bool description = "Boolean indicating whether the load balancer is internal or external" - default = false + default = true +} + +variable "certificate_arn" { + type = string + description = "The ARN of the default SSL server certificate to be use for HTTPS lb listener." + default = null } variable "inbound_cidrs" { From 3dcbd2b5cb16cf4d1fa49dc057ea43e79e939a7d Mon Sep 17 00:00:00 2001 From: Triet Le Date: Thu, 12 Sep 2019 14:58:25 -0400 Subject: [PATCH 26/34] Fixes rsync issue --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index 099dedf..8cf14df 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -46,6 +46,7 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. + config.vm.synced_folder '.', '/vagrant', disabled: true config.vm.synced_folder "./salt", "/srv/salt" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. From 0784bafa8daa7f56821d1eff6bcea7f9ab4dd4ac Mon Sep 17 00:00:00 2001 From: Triet Le Date: Thu, 12 Sep 2019 14:59:01 -0400 Subject: [PATCH 27/34] Updates firewall state to ignore firewall changes on dev --- salt/vault/firewall.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/vault/firewall.sls b/salt/vault/firewall.sls index a4bbf8a..841e8ed 100644 --- a/salt/vault/firewall.sls +++ b/salt/vault/firewall.sls @@ -1,6 +1,8 @@ {% from "vault/map.jinja" import vault with context %} +{%- if not vault.dev_mode %} + firewalld_vault_service: firewalld.service: - name: vault @@ -16,3 +18,5 @@ firewalld_vault_zone: - sources: {{ vault.inbound_cidrs }} - require: - firewalld: firewalld_vault_service + +{%- endif %} From f35a64641939bcd4d3b956e938fea372f125af0f Mon Sep 17 00:00:00 2001 From: Triet Le Date: Thu, 12 Sep 2019 22:08:20 -0400 Subject: [PATCH 28/34] Cleans up vault states/modules/utils Adds config base class\nAdjust config classes to inherit from base config class --- salt/_modules/vault.py | 256 +++++++++++++---------------------------- salt/_states/vault.py | 12 +- salt/_utils/vault.py | 19 +-- salt/vault/sync.sls | 8 +- 4 files changed, 93 insertions(+), 202 deletions(-) diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 36e0d49..943ae68 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -14,13 +14,12 @@ log = logging.getLogger(__name__) +DEPS_INSTALLED = False try: import hvac DEPS_INSTALLED = True -except ImportError as e: - log.debug('Unable to import the dependencies...') - log.exception(e) - DEPS_INSTALLED = False +except ImportError: + pass class InsufficientParameters(Exception): @@ -28,7 +27,10 @@ class InsufficientParameters(Exception): def __virtual__(): - return DEPS_INSTALLED + if DEPS_INSTALLED: + return 'vault' + else: + return False, 'Missing required dependency, `hvac`' def get_policies_manager(): @@ -71,151 +73,83 @@ def get_audit_device_manager(): return VaultAuditManager() -class VaultAuthMethod: +class VaultConfigBase: type = None path = None description = None config = None - auth_config = None - extra_config = None - def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): - """ - Instanciate class - - :param type: Authentication type - :type type: str - :param path: Authentication mount point - :type path: str - :param description: Authentication description - :type description: str - :param config: Authentication config - :type config: dict - :param auth_config: Authentification specific configuration - :type auth_config: dict - :param extra_config: Extra Authentification configurations - :type extra_config: dict + def __init__(self, type, path, description, config): + """[summary] + + Arguments: + type {string} -- The type of the config + path {string} -- The path in which to enable the config + description {[type]} -- A human-friendly description """ + + config = config or {} + self.type = type self.path = path.replace("/", "") self.description = (description if description else "") - self.config = {} - for elem in config: - if config[elem] != "": - self.config[elem] = config[elem] - self.auth_config = auth_config - self.extra_config = extra_config + self.config = {k: v for k, v in config.items() if v != ''} def get_unique_id(self): """ - Return a unique hash by auth method only using the type and path + Return a unique hash of the config by only using the type and path - :return: str + Returns: + string -- unique hash of the type and path """ - unique_str = str(self.type + self.path) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash + return self.hash_value(self.type + self.path) def get_tuning_hash(self): """ Return a unique ID per tuning configuration - :return: str + Returns: + string -- unique hash of the configuration """ - conf_str = self.description + str(self.config) - sha256_hash = hashlib.sha256(conf_str.encode()).hexdigest() - return sha256_hash + return self.hash_value(self.description + str(self.config)) + + def hash_value(self, value): + return hashlib.sha256(value.encode()).hexdigest() def __eq__(self, other): return self.get_unique_id() == other.get_unique_id() def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % + return ("Path: %s - Type: %s - Desc: %s - Config: %s - Hash : %s" % (self.path, self.type, self.description, str(self.config), self.get_unique_id())) -class VaultSecretEngine: - """ - Vault secrete engine container - """ - type = None - path = None - description = None - config = None - secret_config = None +class VaultAuthMethod(VaultConfigBase): + auth_config = None extra_config = None - def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): - """ - Instantiate Class - - :param type: Secret type - :type type: str - :param path: Secret mount point - :type path: str - :param description: Secret description - :type description: str - :param config: Secret basic config - :type config: dict - :param secret_config: Secret specific configuration - :type secret_config: dict - :param extra_config: Secret extra configuration - :type extra_config: dict - """ - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.config = dict() - self.config["force_no_cache"] = False - for elem in config: - if config[elem] != "": - self.config[elem] = config[elem] - self.secret_config = secret_config - self.extra_config = extra_config - - def get_unique_id(self): - """ - Return a unique hash by secret engine only using the type and path - - :return: str - """ - unique_str = str(self.type + self.path) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash - - def __eq__(self, other): - return self.get_unique_id() == other.get_unique_id() + def __init__(self, type, path, description, config=None, auth_config=None, extra_config=None): + super().__init__(type, path, description, config) - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.config), self.get_unique_id())) + self.auth_config = auth_config or {} + self.extra_config = extra_config or {} -class VaultAuditDevice: - type = None - path = None - description = None - options = None +class VaultSecretEngine(VaultConfigBase): + secret_config = None + extra_config = None - def __init__(self, type, path, description, options): - self.type = type - self.path = path.replace("/", "") - self.description = (description if description else "") - self.options = options + def __init__(self, type, path, description, config=None, secret_config=None, extra_config=None): + super().__init__(type, path, description, config) - def get_device_unique_id(self): - unique_str = str(self.type + self.path + - self.description + str(self.options)) - sha256_hash = hashlib.sha256(unique_str.encode()).hexdigest() - return sha256_hash + self.secret_config = secret_config or {} + self.extra_config = extra_config or {} - def __eq__(self, other): - return self.get_device_unique_id() == other.get_device_unique_id() - def __repr__(self): - return ("Path: %s - Type: %s - Desc: %s - Options: %s - Hash : %s" % - (self.path, self.type, self.description, str(self.options), self.get_device_unique_id())) +class VaultAuditDevice(VaultConfigBase): + def __init__(self, type, path, description, config=None): + super().__init__(type, path, description, config) class VaultPolicyManager(): """ @@ -242,9 +176,8 @@ def get_remote_policies(self, client, ret): ', '.join(polices)) log.info('Finished retrieving policies from vault.') - except Exception as e: - ret['result'] = False - log.exception(e) + except Exception: + raise return polices @@ -295,13 +228,12 @@ def push_policies(self, client, remote_policies, local_policies, ret): # Build return object ret['changes']['old'] = remote_policies - if len(new_policies) > 0: + if new_policies: ret['changes']['new'] = json.loads(json.dumps(new_policies)) else: ret['changes']['new'] = "No changes" - except Exception as e: - ret['result'] = False - log.exception(e) + except Exception: + raise def cleanup_policies(self, client, remote_policies, local_policies, ret): """ @@ -323,31 +255,8 @@ def cleanup_policies(self, client, remote_policies, local_policies, ret): [ob['name'] for ob in local_policies])) log.info('Finished cleaning up vault policies.') - except Exception as e: - ret['result'] = False - log.exception(e) - - def sync(self, client, policy_dir, ret): - - log.info('-------------------------------------') - - remote_policies = [] - local_policies = [] - - if client == None: - client = __utils__['vault.build_client']() - try: - remote_policies = self.get_remote_policies(client, ret) - local_policies = self.get_local_policies(policy_dir, ret) - self.push_policies(client, remote_policies, local_policies, ret) - self.cleanup_policies(client, remote_policies, local_policies, ret) - - ret['result'] = True - except Exception as e: - ret['result'] = False - log.exception(e) - log.info('-------------------------------------') - return ret + except Exception: + raise class VaultAuthManager(): @@ -465,7 +374,7 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): 'Provisioning extra configurations for auth method "%s"', auth_method.type) # Get LDAP group mapping from vault ldap_list_group_response = client.auth.ldap.list_groups() - if ldap_list_group_response != None: + if ldap_list_group_response: ldap_groups = ldap_list_group_response["data"]["keys"] log.debug("LDAP groups from vault: %s", str(ldap_groups)) @@ -484,7 +393,7 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): ) # Clean up LDAP group mapping - if ldap_groups != None: + if ldap_groups: for group in ldap_groups: if group in {k.lower(): v for k, v in local_config_groups.items()}: log.debug( @@ -492,9 +401,7 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): else: log.info( 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) - client.auth.ldap.delete_group( - name=group - ) + client.auth.ldap.delete_group(name=group) log.info( 'LDAP group mapping ["%s"] deleted.', group) else: @@ -505,7 +412,7 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): ret['changes']['old'] = json.loads(json.dumps( [ob.type for ob in remote_methods])) - if len(new_auth_methods) > 0: + if new_auth_methods: ret['changes']['new'] = json.loads( json.dumps(new_auth_methods)) else: @@ -585,21 +492,21 @@ def populate_local_secrets_engines(self, configs, ret): secret_config = None extra_config = None + if 'config' in secret_engine: + if secret_engine["config"]: + config = OrderedDict( + sorted(secret_engine["config"].items())) + if 'secret_config' in secret_engine: - if secret_engine["secret_config"] != None: + if secret_engine["secret_config"]: secret_config = OrderedDict( sorted(secret_engine["secret_config"].items())) if 'extra_config' in secret_engine: - if secret_engine["extra_config"] != None: + if secret_engine["extra_config"]: extra_config = OrderedDict( sorted(secret_engine["extra_config"].items())) - if 'config' in secret_engine: - if secret_engine["config"] != None: - config = OrderedDict( - sorted(secret_engine["config"].items())) - local_secret_engines.append(VaultSecretEngine( type=secret_engine["type"], path=secret_engine["path"], @@ -653,7 +560,7 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): log.debug('Secret engine " % s" at path " % s" is enabled.', secret_engine.type, secret_engine.path) - if secret_engine.secret_config != None: + if secret_engine.secret_config: log.info( 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) @@ -669,7 +576,7 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): log.info( 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - if secret_engine.extra_config != None: + if secret_engine.extra_config: log.info( 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) @@ -679,8 +586,8 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): try: existing_roles = client.secrets.activedirectory.list_roles() log.debug(existing_roles) - except Exception as e: - log.exception(e) + except Exception: + raise # Add new roles local_roles = secret_engine.extra_config['roles'] @@ -693,12 +600,11 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): service_account_name=local_roles[key]['service_account_name'], ttl=local_roles[key]['ttl'] ) - except Exception as e: - log.exception(e) - raise salt.exceptions.SaltInvocationError(e) + except Exception: + raise # Remove missing roles - if existing_roles != None: + if existing_roles: for role in existing_roles: if role in {k.lower(): v for k, v in local_roles.items()}: log.debug( @@ -724,7 +630,7 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): ret['changes']['old'] = json.loads(json.dumps([ "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) - if len(new_secrets_engines) > 0: + if new_secrets_engines: ret['changes']['new'] = json.loads( json.dumps(new_secrets_engines)) else: @@ -775,6 +681,7 @@ def get_remote_audit_devices(self, client, ret): devices = [] try: audit_devices_resp = client.sys.list_enabled_audit_devices() + log.debug(audit_devices_resp) for device in audit_devices_resp['data']: audit_device = audit_devices_resp[device] devices.append( @@ -783,7 +690,8 @@ def get_remote_audit_devices(self, client, ret): path=(audit_device["path"] if 'path' in audit_device else device), description=audit_device["description"], - options=json.dumps(audit_device["options"]) + config=OrderedDict( + sorted(audit_device["options"].items())) ) ) @@ -799,16 +707,18 @@ def get_local_audit_devices(self, configs, ret): if configs: try: for audit_device in configs: - if 'options' in audit_device: - options = json.dumps(audit_device["options"]) - log.debug(options) + config = None + if 'config' in audit_device: + if audit_device['config']: + config = OrderedDict( + sorted(audit_device["config"].items())) devices.append( VaultAuditDevice( type=audit_device["type"], path=audit_device["path"], description=audit_device["description"], - options=options + config=config ) ) @@ -840,7 +750,7 @@ def configure_audit_devices(self, client, remote_devices, local_devices, ret): device_type=audit_device.type, path=audit_device.path, description=audit_device.description, - options=json.loads(audit_device.options) + options=audit_device.config ) log.debug('Audit device "%s" at path "%s" is enabled.', audit_device.type, audit_device.path) @@ -850,7 +760,7 @@ def configure_audit_devices(self, client, remote_devices, local_devices, ret): ret['changes']['old'] = json.loads(json.dumps( [ob.type for ob in remote_devices])) - if len(new_audit_devices) > 0: + if new_audit_devices: ret['changes']['new'] = json.loads( json.dumps(new_audit_devices)) else: diff --git a/salt/_states/vault.py b/salt/_states/vault.py index 042cefc..c75a019 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -5,20 +5,24 @@ log = logging.getLogger(__name__) +DEPS_INSTALLED = False +IMPORT_ERROR = "" try: import hvac import boto3 DEPS_INSTALLED = True except ImportError as e: - log.debug('Unable to import the libraries.') - log.exception(e) - DEPS_INSTALLED = False + IMPORT_ERROR = e + pass __all__ = ['initialize'] def __virtual__(): - return DEPS_INSTALLED + if DEPS_INSTALLED: + return 'vault' + else: + return False, 'Missing required dependency. {}'.format(IMPORT_ERROR) def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index a74ce72..3ba284c 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -1,14 +1,8 @@ -from __future__ import absolute_import, print_function, unicode_literals +from __future__ import absolute_import import logging import os -import requests -import json -import yaml import hvac -from collections import OrderedDict -from functools import wraps - log = logging.getLogger(__name__) logging.getLogger("requests").setLevel(logging.WARNING) @@ -37,14 +31,3 @@ def get_vault_url(): url = "http://localhost" return "{}:{}".format(url, port) - - -def load_config_file(config_path): - configs = None - with open(os.path.join(config_path), 'r') as fd: - try: - configs = yaml.load(fd) - except yaml.YAMLError as e: - log.critical("Unable to load conf file: " + str(e)) - return False - return configs diff --git a/salt/vault/sync.sls b/salt/vault/sync.sls index 9b428a9..450e3ad 100644 --- a/salt/vault/sync.sls +++ b/salt/vault/sync.sls @@ -16,12 +16,6 @@ sync_secrets_engines: config: default_lease_ttl: 1800 max_lease_ttl: 1800 - - type: database - path: db1 - description: database secrets mount - config: - default_lease_ttl: 30m - max_lease_ttl: 60m sync_authentication_methods: vault.auth_methods_synced: @@ -39,7 +33,7 @@ sync_audit_devices: - type: file path: file_log description: first audit device - options: + config: file_path: /etc/vault/logs/audit.log sync_policies: From faca0a1b74ca51b9d6214cbd260ac705eefd26c8 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 25 Sep 2019 08:27:35 -0400 Subject: [PATCH 29/34] Adds support for super() in py2 --- salt/_modules/vault.py | 4 ++- salt/vault/maps/osfamilymap.yaml | 5 +++- scripts/appscript.sh | 45 ++++++++++++++++++++------------ 3 files changed, 36 insertions(+), 18 deletions(-) diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 943ae68..1c92933 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -4,6 +4,7 @@ library. """ from __future__ import absolute_import +from builtins import super import logging import hashlib @@ -73,7 +74,7 @@ def get_audit_device_manager(): return VaultAuditManager() -class VaultConfigBase: +class VaultConfigBase(object): type = None path = None description = None @@ -151,6 +152,7 @@ class VaultAuditDevice(VaultConfigBase): def __init__(self, type, path, description, config=None): super().__init__(type, path, description, config) + class VaultPolicyManager(): """ Module for managing policies within Vault diff --git a/salt/vault/maps/osfamilymap.yaml b/salt/vault/maps/osfamilymap.yaml index fc8b25a..d5d1485 100644 --- a/salt/vault/maps/osfamilymap.yaml +++ b/salt/vault/maps/osfamilymap.yaml @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- - +# vim: ft=yaml +--- RedHat: platform: linux_amd64 gpg_pkg: gnupg2 @@ -13,6 +14,7 @@ RedHat: - openssl-devel pip_deps: - hvac + - future - testinfra - boto3 @@ -30,5 +32,6 @@ Debian: pip_deps: - pyopenssl - hvac + - future - testinfra - boto3 diff --git a/scripts/appscript.sh b/scripts/appscript.sh index 8ccbae4..ce66cf1 100644 --- a/scripts/appscript.sh +++ b/scripts/appscript.sh @@ -3,8 +3,11 @@ set -eu -o pipefail # Required vars SALT_ARCHIVE=${salt_content_archive} +PILLAR_ARCHIVE=${pillar_archive} SALT_DIR="/srv/salt" +PILLAR_DIR="/srv/pillar" ARCHIVE_FILE_NAME="salt_formula.zip" +PILLAR_FILE_NAME="pillar.zip" # Standard aws envs AWS_DEFAULT_REGION=$(curl -sSL http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//') @@ -15,8 +18,8 @@ yum install unzip jq -y echo "[appscript]: Ensuring default salt srv location exists, $SALT_DIR..." mkdir -p $SALT_DIR -echo "[appscript]: Download salt formula archive file from s3://$SALT_ARCHIVE..." -aws s3 cp "s3://$SALT_ARCHIVE" $ARCHIVE_FILE_NAME +echo "[appscript]: Download salt formula archive file from $SALT_ARCHIVE..." +aws s3 cp $SALT_ARCHIVE $ARCHIVE_FILE_NAME echo "[appscript]: Unzip salt formula archive file to $SALT_DIR" unzip $ARCHIVE_FILE_NAME -d $SALT_DIR @@ -24,14 +27,20 @@ unzip $ARCHIVE_FILE_NAME -d $SALT_DIR echo "[appscript]: Remove salt formula archive file $ARCHIVE_FILE_NAME" rm $ARCHIVE_FILE_NAME -echo "[appscript]: Configuring salt to read ec2 metadata into grains..." -echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf +echo "[appscript]: Ensuring default pillar location exists, $PILLAR_DIR..." +mkdir -p $PILLAR_DIR -echo "[appscript]: Setting the required salt grains for vault..." -salt-call --local grains.setval vault ${salt_grains_json} +echo "[appscript]: Download pillar archive file from $PILLAR_ARCHIVE..." +aws s3 cp $PILLAR_ARCHIVE $PILLAR_FILE_NAME -echo "[appscript]: Update minion config to allow module.run..." -printf 'use_superseded:\n - module.run\n' >> /etc/salt/minion +echo "[appscript]: Unzip pillar archive file to $PILLAR_DIR" +unzip $PILLAR_FILE_NAME -d $PILLAR_DIR + +echo "[appscript]: Remove pillar archive file $PILLAR_FILE_NAME" +rm $PILLAR_FILE_NAME + +echo "[appscript]: Configuring salt to read ec2 metadata into grains..." +echo "metadata_server_grains: True" > /etc/salt/minion.d/metadata.conf echo "[appscript]: Print out salt versions report" salt-call --local --versions-report @@ -40,37 +49,41 @@ echo "[appscript]: Updating salt states/modules/utils/grains..." salt-call --local saltutil.sync_all echo "[appscript]: Retrieving path for directory storing log files..." -LOGS_DIR=$(salt-call --local grains.get 'vault:logs_path' --output=json | jq .[] -r) +LOGS_DIR=$(salt-call --local pillar.get 'vault:lookup:logs_dir' --output=json | jq .[] -r) export LOGS_DIR +echo "[appscript]: Retrieving logs path from pillar..." +LOGS_PATH=$(salt-call --local pillar.get 'vault:lookup:logs_path' --output=json | jq .[] -r) +export LOGS_PATH + echo "[appscript]: Ensuring logs dir location exists, $LOGS_DIR..." mkdir -p $LOGS_DIR echo "[appscript]: Installing vault and configuring service, firewall..." -salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee $LOGS_DIR/state.vault.log +salt-call --local --retcode-passthrough state.sls vault -l info 2>&1 | tee $LOGS_PATH.log echo "[appscript]: Initializing vault..." -salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee $LOGS_DIR/state.vault.initialize.log +salt-call --local --retcode-passthrough state.sls vault.initialize -l info 2>&1 | tee $LOGS_PATH.initialize.log echo "[appscript]: Sync configurations with the vault..." -SSM_PATH=$(salt-call --local grains.get 'vault:ssm_path' --output=json | jq .[] -r) +SSM_PATH=$(salt-call --local pillar.get 'vault:lookup:ssm_path' --output=json | jq .[] -r) export SSM_PATH VAULT_TOKEN=$(aws ssm get-parameter --name /"$SSM_PATH"/root_token --with-decryption --query 'Parameter.Value' | tr -d '"') export VAULT_TOKEN -salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_DIR/state.vault.sync.log +salt-call --local --retcode-passthrough state.sls vault.sync -l info 2>&1 | tee $LOGS_PATH.sync.log echo "[appscript]: Retrieving Vault's status" # Get api port for vault server -API_PORT=$(salt-call --local grains.get 'vault:api_port' --output=json | jq .[]) +API_PORT=$(salt-call --local pillar.get 'vault:lookup:api_port' --output=json | jq .[]) export API_PORT -# Set up vault address +# Exports Vault's address VAULT_ADDR=http://127.0.0.1:$API_PORT export VAULT_ADDR -# Retrieve vault status +# Retrieves Vault's status vault status echo "[appscript]: Completed appscript vault successfully!" From 3e1005192b3e377ee87072a61fa409deb830ca3e Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 25 Sep 2019 09:17:22 -0400 Subject: [PATCH 30/34] Switch out grains and use pillar for configurations --- main.tf | 179 +++++++------- salt/_modules/vault.py | 324 ++++++++++++++++---------- salt/_states/vault.py | 50 ++-- salt/_utils/vault.py | 24 +- salt/vault/map.jinja | 6 +- salt/vault/maps/defaults.yaml | 8 +- salt/vault/maps/initfamilymap.yaml | 4 +- salt/vault/sync.sls | 36 +-- tests/Gopkg.lock | 285 ---------------------- tests/Gopkg.toml | 34 --- tests/vault-py2/main.tf | 2 +- tests/vault-py2/pillar/top.sls | 3 + tests/vault-py2/pillar/vault/init.sls | 48 ++++ variables.tf | 11 +- 14 files changed, 400 insertions(+), 614 deletions(-) delete mode 100644 tests/Gopkg.lock delete mode 100644 tests/Gopkg.toml create mode 100644 tests/vault-py2/pillar/top.sls create mode 100644 tests/vault-py2/pillar/vault/init.sls diff --git a/main.tf b/main.tf index 878da56..b88bb5d 100644 --- a/main.tf +++ b/main.tf @@ -11,38 +11,42 @@ terraform { ### locals { - vpc_id = element(data.aws_subnet.lb.*.vpc_id, 0) - archive_file_name = "salt.zip" - configs_file_name = "configs.zip" - appscript_file_name = "appscript.sh" - config_dir_path = "/etc/vault/configs" - logs_path = "/var/log/vault" - default_enabled_repos = ["epel"] - default_inbound_cdirs = ["10.0.0.0/16", "10.0.0.0/8"] - s3_bucket_name = join("-", [var.name, random_string.this.result]) - appscript_url = join("/", [module.s3_bucket.id, local.appscript_file_name]) - archive_dir_path = join("/", [path.module, ".files"]) - appscript_dir_path = join("/", [path.module, "scripts"]) - role_name = join("-", [upper(var.name), "INSTANCE", data.aws_caller_identity.current.account_id]) - ssm_root_path = join("/", ["vault", var.environment, data.aws_caller_identity.current.account_id, var.name]) - s3_salt_vault_content = join("/", [module.s3_bucket.id, local.archive_file_name]) - s3_vault_configuration = var.vault_configs_path == null ? "" : join("/", [module.s3_bucket.id, local.configs_file_name]) - dynamodb_table = var.dynamodb_table == null ? join("", aws_dynamodb_table.this.*.id) : var.dynamodb_table - kms_key_id = var.kms_key_id == null ? join("", aws_kms_key.this.*.id) : var.kms_key_id - certificate_arn = var.certificate_arn == null ? join("", aws_acm_certificate.this.*.id) : var.certificate_arn - vault_url = var.vault_url == null ? join(".", [var.name, var.domain_name]) : var.vault_url + vpc_id = data.aws_subnet.lb[0].vpc_id + bucket_name = "${var.name}-${random_string.this.result}" + archive_file_name = "salt.zip" + configs_file_name = "configs.zip" + appscript_file_name = "appscript.sh" + pillar_file_name = "pillar.zip" + logs_dir = "/var/log/vault" + logs_path = "${local.logs_dir}/state.vault" + enabled_repos = "epel" + default_inbound_cdirs = ["10.0.0.0/16"] + s3_appscript_url = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.appscript_file_name}" + s3_salt_vault_content = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.archive_file_name}" + s3_pillar_url = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.pillar_file_name}" + archive_path = join("/", [path.module, ".files", local.archive_file_name]) + pillar_path = join("/", [path.cwd, ".files", local.pillar_file_name]) + appscript_path = join("/", [path.module, "scripts", local.appscript_file_name]) + ssm_root_path = join("/", ["vault", var.environment, data.aws_caller_identity.current.account_id, var.name]) + role_name = join("-", [upper(var.name), "INSTANCE", data.aws_caller_identity.current.account_id]) + dynamodb_table = var.dynamodb_table == null ? aws_dynamodb_table.this[0].id : var.dynamodb_table + kms_key_id = var.kms_key_id == null ? aws_kms_key.this[0].id : var.kms_key_id + certificate_arn = var.certificate_arn == null ? aws_acm_certificate.this[0].id : var.certificate_arn + vault_url = var.vault_url == null ? join(".", [var.name, var.domain_name]) : var.vault_url # Logs files to be streamed to CloudWatch Logs logs = [ - join("/", [local.logs_path, "state.vault.log"]), - join("/", [local.logs_path, "state.vault.initialize.log"]), - join("/", [local.logs_path, "state.vault.sync.log"]) + "${local.logs_path}.log", + "${local.logs_path}.initialize.log", + "${local.logs_path}.sync.log", ] - tags = merge(var.tags, + tags = merge( { + Name = var.name, Environment = var.environment - } + }, + var.tags ) } @@ -76,51 +80,49 @@ data "aws_subnet" "lb" { id = var.lb_subnet_ids[count.index] } -data "archive_file" "salt" { - type = "zip" - source_dir = join("/", [path.module, "salt"]) - output_path = join("/", [local.archive_dir_path, local.archive_file_name]) +# Manage vault pillar +resource "template_dir" "pillar" { + source_dir = var.vault_pillar_path + destination_dir = "${path.cwd}/.files/pillar" + + vars = { + api_port = var.api_port + cluster_port = var.cluster_port + dynamodb_table = local.dynamodb_table + inbound_cidrs = jsonencode(concat(var.inbound_cidrs, local.default_inbound_cdirs)) + kms_key_id = local.kms_key_id + logs_dir = local.logs_dir + logs_path = local.logs_path + region = data.aws_region.current.name + ssm_path = local.ssm_root_path + vault_version = var.vault_version + } } -data "archive_file" "configs" { - count = var.vault_configs_path == null ? 0 : 1 +data "archive_file" "pillar" { type = "zip" - source_dir = var.vault_configs_path - output_path = join("/", [local.archive_dir_path, local.configs_file_name]) + source_dir = template_dir.pillar.destination_dir + output_path = local.pillar_path } -data "template_file" "appscript" { - template = file(join("/", [local.appscript_dir_path, local.appscript_file_name])) - - vars = { - salt_content_archive = local.s3_salt_vault_content - - salt_grains_json = join("", ["'", jsonencode({ - api_port = var.api_port - cluster_port = var.cluster_port - dynamodb_table = local.dynamodb_table - inbound_cidrs = concat(var.inbound_cidrs, local.default_inbound_cdirs) - kms_key_id = local.kms_key_id - logs_path = local.logs_path - region = data.aws_region.current.name - ssm_path = local.ssm_root_path - version = var.vault_version - }), "'"]) - } +resource "aws_s3_bucket_object" "pillar" { + bucket = module.s3_bucket.this_s3_bucket_id + key = local.pillar_file_name + source = local.pillar_path + etag = data.archive_file.pillar.output_md5 } # Manage S3 bucket module module "s3_bucket" { - source = "terraform-aws-modules/s3-bucket/aws" - version = "0.0.1" + source = "git::https://github.com/terraform-aws-modules/terraform-aws-s3-bucket.git?ref=v0.1.0" - bucket = local.s3_bucket_name + bucket = local.bucket_name } resource "aws_s3_bucket_policy" "this" { - bucket = module.s3_bucket.id - policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = module.s3_bucket.arn }) + bucket = module.s3_bucket.this_s3_bucket_id + policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = module.s3_bucket.this_s3_bucket_arn }) } # Manage IAM module @@ -129,7 +131,7 @@ module "iam" { role_name = local.role_name policy_vars = { - bucket_name = module.s3_bucket.id + bucket_name = module.s3_bucket.this_s3_bucket_id dynamodb_table = local.dynamodb_table kms_key_id = local.kms_key_id stack_name = var.name @@ -144,24 +146,32 @@ resource "random_string" "this" { upper = false } -# Manage archive and appscript files +# Manage archive, appscript, pillar files + +data "archive_file" "salt" { + type = "zip" + source_dir = "${path.module}/salt" + output_path = local.archive_path +} + resource "aws_s3_bucket_object" "salt_zip" { - bucket = module.s3_bucket.id + bucket = module.s3_bucket.this_s3_bucket_id key = local.archive_file_name - source = join("/", [local.archive_dir_path, local.archive_file_name]) + source = local.archive_path etag = data.archive_file.salt.output_md5 } -resource "aws_s3_bucket_object" "configs_zip" { - count = var.vault_configs_path == null ? 0 : 1 - bucket = module.s3_bucket.id - key = local.configs_file_name - source = join("/", [local.archive_dir_path, local.configs_file_name]) - etag = data.archive_file.configs[count.index].output_md5 +data "template_file" "appscript" { + template = file(local.appscript_path) + + vars = { + salt_content_archive = local.s3_salt_vault_content + pillar_archive = local.s3_pillar_url + } } resource "aws_s3_bucket_object" "app_script" { - bucket = module.s3_bucket.id + bucket = module.s3_bucket.this_s3_bucket_id key = local.appscript_file_name content = data.template_file.appscript.rendered etag = md5(data.template_file.appscript.rendered) @@ -171,14 +181,14 @@ resource "aws_s3_bucket_object" "app_script" { resource "aws_kms_alias" "this" { count = var.kms_key_id == null ? 1 : 0 name = "alias/${var.name}" - target_key_id = join("", aws_kms_key.this.*.key_id) + target_key_id = aws_kms_key.this[0].key_id } resource "aws_kms_key" "this" { count = var.kms_key_id == null ? 1 : 0 description = "KMS Key for ${var.name}" - tags = merge({ Name = var.name }, local.tags) + tags = local.tags } # Manage domain record @@ -201,7 +211,7 @@ resource "aws_acm_certificate" "this" { domain_name = local.vault_url validation_method = "DNS" - tags = merge({ Name = var.name }, local.tags) + tags = local.tags lifecycle { create_before_destroy = true @@ -211,18 +221,18 @@ resource "aws_acm_certificate" "this" { resource "aws_route53_record" "cert_validation" { count = var.certificate_arn == null ? 1 : 0 - name = join("", aws_acm_certificate.this.*.domain_validation_options.0.resource_record_name) - type = join("", aws_acm_certificate.this.*.domain_validation_options.0.resource_record_type) + name = aws_acm_certificate.this[0].domain_validation_options[0].resource_record_name + type = aws_acm_certificate.this[0].domain_validation_options[0].resource_record_type zone_id = var.route53_zone_id - records = ["${join("", aws_acm_certificate.this.*.domain_validation_options.0.resource_record_value)}"] + records = aws_acm_certificate.this[*].domain_validation_options[0].resource_record_value ttl = 60 } resource "aws_acm_certificate_validation" "this" { count = var.certificate_arn == null ? 1 : 0 - certificate_arn = join("", aws_acm_certificate.this.*.arn) - validation_record_fqdns = ["${join("", aws_route53_record.cert_validation.*.fqdn)}"] + certificate_arn = aws_acm_certificate.this[0].arn + validation_record_fqdns = aws_route53_record.cert_validation[*].fqdn } # Manage load balancer @@ -232,7 +242,7 @@ resource "aws_lb" "this" { security_groups = [aws_security_group.lb.id] subnets = var.lb_subnet_ids - tags = merge({ Name = var.name }, local.tags) + tags = local.tags } resource "aws_lb_listener" "http" { @@ -287,7 +297,7 @@ resource "aws_lb_target_group" "this" { unhealthy_threshold = "2" } - tags = merge({ Name = var.name }, local.tags) + tags = local.tags } # Manage security groups @@ -371,7 +381,7 @@ resource "aws_dynamodb_table" "this" { type = "S" } - tags = merge({ Name = var.name }, local.tags) + tags = local.tags } resource "aws_appautoscaling_target" "this" { @@ -387,11 +397,11 @@ resource "aws_appautoscaling_target" "this" { resource "aws_appautoscaling_policy" "this" { count = var.dynamodb_table == null ? 1 : 0 - name = join(":", ["DynamoDBReadCapacityUtilization", join("", aws_appautoscaling_target.this.*.resource_id)]) + name = join(":", ["DynamoDBReadCapacityUtilization", aws_appautoscaling_target.this[0].resource_id]) policy_type = "TargetTrackingScaling" - resource_id = join("", aws_appautoscaling_target.this.*.resource_id) - scalable_dimension = join("", aws_appautoscaling_target.this.*.scalable_dimension) - service_namespace = join("", aws_appautoscaling_target.this.*.service_namespace) + resource_id = aws_appautoscaling_target.this[0].resource_id + scalable_dimension = aws_appautoscaling_target.this[0].scalable_dimension + service_namespace = aws_appautoscaling_target.this[0].service_namespace target_tracking_scaling_policy_configuration { predefined_metric_specification { @@ -412,7 +422,7 @@ module "autoscaling_group" { AmiId = data.aws_ami.this.id AmiDistro = "CentOS" - AppScriptUrl = join("", ["s3://", local.appscript_url]) + AppScriptUrl = local.s3_appscript_url CfnBootstrapUtilsUrl = var.cfn_bootstrap_utils_url CfnEndpointUrl = var.cfn_endpoint_url @@ -440,6 +450,5 @@ module "autoscaling_group" { MinCapacity = var.min_capacity MaxCapacity = var.max_capacity - EnableRepos = join(" ", concat(var.enabled_repos, local.default_enabled_repos)) - + EnableRepos = local.enabled_repos } diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 1c92933..6783bd8 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -35,41 +35,37 @@ def __virtual__(): def get_policies_manager(): - """ - Retrieve an object containing helper methods for the policy manager + """Retrieve an object containing helper methods for the policy manager Returns: - [VaultPolicyManager] -- Policy Manager + [VaultPolicyManager] -- Policy Manager """ return VaultPolicyManager() def get_secret_engines_manager(): - """ - Retrieve an object containing helper methods for the secrets engines manager + """Retrieve an object containing helper methods for the secrets engines manager Returns: - [VaultSecretsManager] -- Secrets Engines Manager + [VaultSecretsManager] -- Secrets Engines Manager """ return VaultSecretsManager() def get_auth_methods_manager(): - """[summary] - Retrieve an object containing helper methods for the auth methods manager + """Retrieve an object containing helper methods for the auth methods manager Returns: - [VaultAuthManager] -- Auth Methods Manager + [VaultAuthManager] -- Auth Methods Manager """ return VaultAuthManager() def get_audit_device_manager(): - """[summary] - Retrieve an object containing helper methods for the audit device manager + """Retrieve an object containing helper methods for the audit device manager Returns: - [VaultAuditManager] -- Audit Device Manager + [VaultAuditManager] -- Audit Device Manager """ return VaultAuditManager() @@ -81,7 +77,7 @@ class VaultConfigBase(object): config = None def __init__(self, type, path, description, config): - """[summary] + """Initialize classs Arguments: type {string} -- The type of the config @@ -97,8 +93,7 @@ def __init__(self, type, path, description, config): self.config = {k: v for k, v in config.items() if v != ''} def get_unique_id(self): - """ - Return a unique hash of the config by only using the type and path + """Return a unique hash of the config by only using the type and path Returns: string -- unique hash of the type and path @@ -106,11 +101,10 @@ def get_unique_id(self): return self.hash_value(self.type + self.path) def get_tuning_hash(self): - """ - Return a unique ID per tuning configuration + """Return a unique ID per tuning configuration Returns: - string -- unique hash of the configuration + string -- unique hash of the configuration """ return self.hash_value(self.description + str(self.config)) @@ -154,16 +148,23 @@ def __init__(self, type, path, description, config=None): class VaultPolicyManager(): - """ - Module for managing policies within Vault + """Module for handling Vault Policies """ def __init__(self): - log.info("Initializing Vault Policy Manager...") + """Initialize Vault Policies Manager + """ + log.info("Initializing Vault Policies Manager...") def get_remote_policies(self, client, ret): - """ - Reading policies from configs folder + """Retrieve policies from remote vault server + + Arguments: + client {hvac} -- hvac client + ret {dict} -- salt state result + + Returns: + [list] -- policies """ log.info('Retrieving policies from vault...') polices = [] @@ -174,43 +175,21 @@ def get_remote_policies(self, client, ret): if not (policy == 'root' or policy == 'default'): polices.append(policy) - log.debug('Current policies: %s' % - ', '.join(polices)) - log.info('Finished retrieving policies from vault.') - except Exception: raise - return polices - - def load_local_policies(self, policy_dir, ret): - """ - Reading policies from configs folder - """ - log.info('Loading policies from local config folder...') - policies = [] - try: - for policy_file in glob.iglob(os.path.join(policy_dir, "*.hcl")): - name = os.path.splitext(os.path.basename(policy_file))[0] - prefix = policy_file.split(os.sep)[-2] - log.debug("Local policy %s - prefix: %s - name: %s found" - % (policy_file, prefix, name)) - - with open(policy_file, 'r') as fd: - policies.append({ - "name": name, - "content": fd.read() - }) - - log.info('Finished loading policies local config folder.') - except Exception: - raise + log.info('Finished retrieving policies from vault.') - return policies + return polices def push_policies(self, client, remote_policies, local_policies, ret): - """ - Sync policies from configs folder to vault + """Push policies from local config to remote vault server + + Arguments: + client {hvac} -- hvac client + remote_policies {list} -- policies from the remote vault server + local_policies {list} -- policies from local config + ret {dict} -- salt state result """ log.info('Pushing policies from local config folder to vault...') new_policies = [] @@ -226,20 +205,22 @@ def push_policies(self, client, remote_policies, local_policies, ret): new_policies.append(policy["name"]) log.debug('Policy "%s" has been created.', policy["name"]) - log.info('Finished pushing policies local config folder to vault.') - # Build return object ret['changes']['old'] = remote_policies - if new_policies: - ret['changes']['new'] = json.loads(json.dumps(new_policies)) - else: - ret['changes']['new'] = "No changes" + ret['changes']['new'] = new_policies or "No changes" except Exception: raise + log.info('Finished pushing policies local config folder to vault.') + def cleanup_policies(self, client, remote_policies, local_policies, ret): - """ - Cleaning up policies + """Removes policies that are not present in the local config + + Arguments: + client {hvac} -- hvac client + remote_policies {list} -- policies current on the remote vault server + local_policies {list} --policies from local config + ret {dict} -- salt state result """ log.info('Cleaning up vault policies...') has_change = False @@ -253,25 +234,31 @@ def cleanup_policies(self, client, remote_policies, local_policies, ret): log.debug('"%s" is removed.', policy) if has_change: - ret['change']['new'] = json.loads(json.dumps( - [ob['name'] for ob in local_policies])) - - log.info('Finished cleaning up vault policies.') + ret['changes']['new'] = [ob['name'] for ob in local_policies] except Exception: raise + log.info('Finished cleaning up vault policies.') + class VaultAuthManager(): - """ - Module for managing Vault Authentication Methods + """Module for managing Vault Authentication Methods """ def __init__(self): - log.info("Initializing Vault Auth Manager...") + """Initialize Authentication Manager + """ + log.info("Initializing Vault Auth Manager...") def get_remote_auth_methods(self, client, ret): - """ - Retrieve auth methods from vault + """Retrieve authentication methods from remote vault server + + Arguments: + client {hvac} -- hvac client + ret {dict} -- result from state + + Returns: + list -- auth methods """ log.info('Retrieving auth methods from Vault...') auth_resp = client.sys.list_auth_methods() @@ -293,9 +280,19 @@ def get_remote_auth_methods(self, client, ret): raise log.info('Finished retrieving auth methods from vault.') + return auth_methods def populate_local_auth_methods(self, configs, ret): + """Get auth methods from local config + + Arguments: + configs {list} -- auth methods information + ret {dict} -- salt state result + + Returns: + list -- auth methods + """ log.info('Populating local auth methods...') auth_methods = [] @@ -323,13 +320,22 @@ def populate_local_auth_methods(self, configs, ret): extra_config=extra_config ) ) - log.info('Finished populating local auth methods.') except Exception: raise + log.info('Finished populating local auth methods.') + return auth_methods def configure_auth_methods(self, client, remote_methods, local_methods, ret): + """Compare and configure local authentication methods with remote vault server + + Arguments: + client {hvac} -- hvac client + remote_methods {list} -- auth methods from remote vault server + local_methods {list} -- auth methods from local config + ret {dict} -- salt state result + """ log.info('Processing and configuring auth methods...') new_auth_methods = [] @@ -401,30 +407,34 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): log.debug( 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) else: - log.info( - 'LDAP group mapping ["%s"] does not exists in configuration, deleting...', group) + log.debug( + 'LDAP group mapping ["%s"] does not exist in configuration, deleting...', group) client.auth.ldap.delete_group(name=group) - log.info( + log.debug( 'LDAP group mapping ["%s"] deleted.', group) else: log.debug( 'Auth method "%s" does not contain any extra configurations.', auth_method.type ) - # Build return object - ret['changes']['old'] = json.loads(json.dumps( - [ob.type for ob in remote_methods])) - if new_auth_methods: - ret['changes']['new'] = json.loads( - json.dumps(new_auth_methods)) - else: - ret['changes']['new'] = "No changes" + # Build return object + ret['changes']['old'] =[ob.type for ob in remote_methods] + ret['changes']['new'] = new_auth_methods or "No changes" - log.info('Finished processing and configuring auth methods...') except Exception: raise + log.info('Finished processing and configuring auth methods...') + def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): + """Disabling any auth methods not present in the local config + + Arguments: + client {hvac} -- hvac client + remote_methods {list} -- auth methods from remote vault server + local_methods {list} -- auth methods from local config + ret {dict} -- salt state result + """ log.info('Cleaning up auth methods...') has_change = False @@ -432,37 +442,44 @@ def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): for auth_method in remote_methods: if auth_method not in local_methods: has_change = True - log.info( + log.debug( 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) client.sys.disable_auth_method( path=auth_method.path ) - log.info('Auth method "%s" is disabled.', auth_method.type) + log.debug('Auth method "%s" is disabled.', auth_method.type) - log.info('Finished cleaning up auth methods.') if has_change: - ret['changes']['new'] = json.loads(json.dumps( - [ob.type for ob in local_methods])) + ret['changes']['new'] = [ob.type for ob in local_methods] except Exception: raise + log.info('Finished cleaning up auth methods.') + class VaultSecretsManager(): """ - Module for handling Vault secret engines + Module for handling Vault Secrets Engines """ def __init__(self): + """Initialize Vault Secrets Manager + """ log.info("Initializing Vault Secret Manager...") def get_remote_secrets_engines(self, client, ret): - """ - Retrieve secret engines from vault server + """Retrieve secrets engines from remote vault server + + Arguments: + client {hvac} -- hvac client + ret {dict} -- salt state result + + Returns: + list -- secrets engines """ log.info('Retrieving secrets engines from Vault') remote_secret_engines = [] try: - log.info(client) secrets_engines_resp = client.sys.list_mounted_secrets_engines() for engine in secrets_engines_resp['data']: remote_secret_engines.append( @@ -483,8 +500,14 @@ def get_remote_secrets_engines(self, client, ret): return remote_secret_engines def populate_local_secrets_engines(self, configs, ret): - """ - Retrieving secret engines from local config file + """Retriev secrets engines from local config + + Arguments: + configs {list} -- local secrets engines information + ret {dict} -- salt state result + + Returns: + list -- secrets engines """ log.info('Populating local secret engines...') local_secret_engines = [] @@ -526,6 +549,14 @@ def populate_local_secrets_engines(self, configs, ret): return local_secret_engines def configure_secrets_engines(self, client, remote_engines, local_engines, ret): + """Compare and configure local vault secrets engines config with vault remote servers + + Arguments: + client {hvac} -- hvac client + remote_engines {list} -- secrets engines from remote vault server + local_engines {list} -- secrets engines from local vault config + ret {dict} -- salt state result + """ log.info('Processing and configuring secrets engines...') new_secrets_engines = [] try: @@ -552,14 +583,17 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', secret_engine.type, secret_engine.path) - new_secrets_engines.append(secret_engine.type) + client.sys.enable_secrets_engine( backend_type=secret_engine.type, path=secret_engine.path, description=secret_engine.description, config=secret_engine.config ) - log.debug('Secret engine " % s" at path " % s" is enabled.', + + new_secrets_engines.append("type: {} - path: {}".format(secret_engine.type, secret_engine.path)) + + log.debug('Secret engine "%s" at path "%s" is enabled.', secret_engine.type, secret_engine.path) if secret_engine.secret_config: @@ -612,12 +646,12 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): log.debug( 'AD role ["%s"] exists in configuration, no cleanup necessary', role) else: - log.info( + log.debug( 'Ad role ["%s"] does not exists in configuration, deleting...', role) client.secrets.activedirectory.delete_role( name=role ) - log.info( + log.debug( 'AD role has been ["%s"] deleted.', role) else: log.debug( @@ -626,19 +660,21 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): except Exception: raise - log.info('Finished proccessing and configuring secrets engines.') - # Build return object - ret['changes']['old'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in remote_engines])) + ret['changes']['old'] = ["type: {} - path: {}".format(ob.type, ob.path) for ob in remote_engines] + ret['changes']['new'] = new_secrets_engines or "No changes" - if new_secrets_engines: - ret['changes']['new'] = json.loads( - json.dumps(new_secrets_engines)) - else: - ret['changes']['new'] = "No changes" + log.info('Finished proccessing and configuring secrets engines.') def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): + """Disabling any secrets engines that are not present in the local config + + Arguments: + client {hvac} -- hvac client + remote_engines {list} -- secrets engines from remote vault server + local_engines {list} -- secrets engines from local config + ret {dict} -- salt state result + """ log.info('Cleaning up secrets engines...') has_changes = False @@ -658,27 +694,37 @@ def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): client.sys.disable_secrets_engine( path=secret_engine.path ) - log.info('Secrets engine "%s" at path "%s" is disabled.', + log.debug('Secrets engine "%s" at path "%s" is disabled.', secret_engine.type, secret_engine.type) except Exception: raise - log.info('Finished cleaning up secrets engines.') - if has_changes: - ret['changes']['new'] = json.loads(json.dumps([ - "Type: {} - Path: {}".format(ob.type, ob.path) for ob in local_engines])) + ret['changes']['new'] = ["type: {} - path: {}".format(ob.type, ob.path) for ob in local_engines] + + log.info('Finished cleaning up secrets engines.') class VaultAuditManager(): """ - Module for handling Vault audit devices + Module for handling Vault Audit Devices """ def __init__(self): + """Initialize Vault Audit Managers + """ log.info("Initializing Vault Audit Manager...") def get_remote_audit_devices(self, client, ret): + """Get audit devices information from remote vault server + + Arguments: + client {hvac} -- hvac client + ret {dict} -- salt state result + + Returns: + list -- audit devices + """ log.info("Retrieving audit devices from vault...") devices = [] try: @@ -704,6 +750,15 @@ def get_remote_audit_devices(self, client, ret): return devices def get_local_audit_devices(self, configs, ret): + """Get audit device inforamtion from local config file + + Arguments: + configs {list} -- audit devices + ret {dict} -- salt state result + + Returns: + list -- audit devices + """ log.info("Loading audit devices from local config...") devices = [] if configs: @@ -723,14 +778,22 @@ def get_local_audit_devices(self, configs, ret): config=config ) ) - - log.info('Finished loading audit devices from local config.') except Exception: raise + log.info('Finished loading audit devices from local config.') + return devices def configure_audit_devices(self, client, remote_devices, local_devices, ret): + """Compare and configure audit devices + + Arguments: + client {hvac} -- hvac client + remote_devices {list} -- audit devices from remote vault server + local_devices {list} -- audit devices from local vault config file + ret {dict} -- salt state result + """ log.info('Processing and configuring audit devices...') new_audit_devices = [] try: @@ -757,21 +820,23 @@ def configure_audit_devices(self, client, remote_devices, local_devices, ret): log.debug('Audit device "%s" at path "%s" is enabled.', audit_device.type, audit_device.path) - log.info('Finished processing audit devices.') # Build return object - ret['changes']['old'] = json.loads(json.dumps( - [ob.type for ob in remote_devices])) - - if new_audit_devices: - ret['changes']['new'] = json.loads( - json.dumps(new_audit_devices)) - else: - ret['changes']['new'] = "No changes" - + ret['changes']['old'] = [ob.type for ob in remote_devices] + ret['changes']['new'] = new_audit_devices or "No changes" except Exception: raise + log.info('Finished processing audit devices.') + def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): + """Disabling any audit devices not present in the local config file + + Arguments: + client {hvac} -- hvac client + remote_devices {list} -- list of remote audit devices + local_devices {list} -- list of local audit devices + ret {dict} -- salt state result + """ log.info('Cleaning up audit devices...') has_changes = False try: @@ -783,10 +848,11 @@ def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): client.sys.disable_audit_device( path=audit_device.path ) - log.info('Finished cleaning up audit devices.') if has_changes: - ret['changes']['new'] = json.loads(json.dumps( - [ob.type for ob in local_devices])) + ret['changes']['new'] = [ob.type for ob in local_devices] + except Exception: raise + + log.info('Finished cleaning up audit devices.') diff --git a/salt/_states/vault.py b/salt/_states/vault.py index c75a019..2909fbc 100644 --- a/salt/_states/vault.py +++ b/salt/_states/vault.py @@ -26,20 +26,19 @@ def __virtual__(): def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): - """ - Ensure that the vault instance has been initialized and run the - initialization if it has not. Storing the root token to SSM parameter + """Ensure that the vault instance has been initialized and run the + initialization if it has not. Storing the root token to SSM parameter store. Arguments: - name {string} -- The id used for the state definition - ssm_path {string} -- The path to SSM parameter that will store the root token + name {string} -- The id used for the state definition + ssm_path {string} -- The path to SSM parameter that will store the root token Keyword Arguments: - recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) - recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) + recovery_shares {int} -- Specifies the number of shares to split the recovery key into. (default: {5}) + recovery_threshold {int} -- Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to recovery_shares. (default: {3}) Returns: - ret {dict} -- Result of the execution + ret {dict} -- Result of the execution """ ret = {'name': name, 'comment': '', @@ -98,17 +97,16 @@ def initialized(name, ssm_path, recovery_shares=5, recovery_threshold=3): def secret_engines_synced(name, configs=[]): - """ - Ensure secrets engines are synced with Vault + """Ensure secrets engines are synced with Vault Arguments: - name {string} -- The id used for the state definition + name {string} -- The id used for the state definition Keyword Arguments: - configs {list} -- A list of configuration rules that defined the secrets engines (default: []) + configs {list} -- A list of configuration rules that defined the secrets engines (default: []) Returns: - ret {dict} -- Result of the execution + ret {dict} -- Result of the execution """ client = __utils__['vault.build_client']() @@ -159,12 +157,12 @@ def auth_methods_synced(name, configs=[]): Ensure authentication methods are synced with Vault Arguments: - name {string} -- The id used for the state definition + name {string} -- The id used for the state definition Keyword Arguments: - configs {list} -- A list of configuration rules that defined the authentication methods (default: []) + configs {list} -- A list of configuration rules that defined the authentication methods (default: []) Returns: - ret {dict} -- Result of the execution + ret {dict} -- Result of the execution """ client = __utils__['vault.build_client']() @@ -208,16 +206,15 @@ def auth_methods_synced(name, configs=[]): def policies_synced(name, policies=[]): - """ - Ensure policies are synced with Vault + """Ensure policies are synced with Vault Arguments: - name {string} -- The id used for the state definition + name {string} -- The id used for the state definition Keyword Arguments: - policies {list} -- A list of policies to by synced with Vault (default: []) + policies {list} -- A list of policies to by synced with Vault (default: []) Returns: - ret {dict} -- Result of the execution + ret {dict} -- Result of the execution """ client = __utils__['vault.build_client']() @@ -234,7 +231,7 @@ def policies_synced(name, policies=[]): try: remote_policies = policiesManager.get_remote_policies(client, ret) - local_policies = json.loads(json.dumps(policies)) + local_policies = policies policiesManager.push_policies( client, remote_policies, local_policies, ret) policiesManager.cleanup_policies( @@ -248,16 +245,15 @@ def policies_synced(name, policies=[]): def audit_devices_synced(name, configs=[]): - """ - Ensures audit devices are synced with Vault + """Ensure audit devices are synced with Vault Arguments: - name {string} -- The id used for the state definition + name {string} -- The id used for the state definition Keyword Arguments: - configs {list} -- A list of configuration rules that defined the audit devices (default: []) + configs {list} -- A list of configuration rules that defined the audit devices (default: []) Returns: - ret {dict} -- Result of the execution + ret {dict} -- Result of the execution """ client = __utils__['vault.build_client']() diff --git a/salt/_utils/vault.py b/salt/_utils/vault.py index 3ba284c..95704d9 100644 --- a/salt/_utils/vault.py +++ b/salt/_utils/vault.py @@ -9,7 +9,6 @@ def build_client(url=None, token=None): - vault_url = url if url != None else get_vault_url() client = hvac.Client( url=vault_url, @@ -23,11 +22,18 @@ def build_client(url=None, token=None): def get_vault_url(): - ''' - Returns a string consist of url and port number - ''' - port = __grains__['vault']['api_port'] if __grains__[ - 'vault']['api_port'] != None else 8200 - url = "http://localhost" - - return "{}:{}".format(url, port) + """Construct Vault server's URL + + Returns: + string -- URL of the the vault server + """ + + # default port for vault server is 8200 + port = 8200 + + try: + port = __pillar__['vault']['lookup']['api_port'] + except Exception: + pass + + return "http://localhost:{}".format(port) diff --git a/salt/vault/map.jinja b/salt/vault/map.jinja index 0866d6e..7eef7d8 100644 --- a/salt/vault/map.jinja +++ b/salt/vault/map.jinja @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +# vim: ft=sls syntax=yaml softtabstop=2 tabstop=2 shiftwidth=2 expandtab autoindent {% import_yaml "vault/maps/defaults.yaml" or {} as defaults %} {% import_yaml "vault/maps/osfamilymap.yaml" or {} as osfamilymap %} @@ -8,7 +9,10 @@ default='vault', merge=salt.grains.filter_by(osfamilymap, grain='os_family', merge=salt.grains.filter_by(initfamilymap, grain='init', - merge=salt.grains.get('vault', default={}) + merge=salt.pillar.get('vault:lookup', default={}) ) ) ) %} + +{#- Merge the vault pillar #} +{%- set vault = salt.pillar.get('vault', default=vault, merge=True) %} diff --git a/salt/vault/maps/defaults.yaml b/salt/vault/maps/defaults.yaml index db1b28b..d61ba97 100644 --- a/salt/vault/maps/defaults.yaml +++ b/salt/vault/maps/defaults.yaml @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- - +# vim: ft=yaml +--- vault: - version: 1.1.3 + version: 1.2.0 repo_base_url: "https://releases.hashicorp.com/vault" dev_mode: False dev_configs: "" @@ -15,7 +15,7 @@ vault: max_lease_ttl: 192h #one week recovery_shares: 5 recovery_threshold: 3 - config_dir_path: /srv/salt/vault/configs region: "" dynamodb_table: "" kms_key_id: "" + ssm_path: "" diff --git a/salt/vault/maps/initfamilymap.yaml b/salt/vault/maps/initfamilymap.yaml index acd8dad..1a64a77 100644 --- a/salt/vault/maps/initfamilymap.yaml +++ b/salt/vault/maps/initfamilymap.yaml @@ -1,4 +1,6 @@ -# -*- coding: utf-8 -*-] +# -*- coding: utf-8 -*- +# vim: ft=yaml +--- systemd: service: path: /etc/systemd/system/vault.service diff --git a/salt/vault/sync.sls b/salt/vault/sync.sls index 450e3ad..5b54fe0 100644 --- a/salt/vault/sync.sls +++ b/salt/vault/sync.sls @@ -9,44 +9,16 @@ vault_logs_dir: sync_secrets_engines: vault.secret_engines_synced: - - configs: - - type: kv - path: services - description: Sevices specific folders - config: - default_lease_ttl: 1800 - max_lease_ttl: 1800 + - configs: {{ vault.secrets_engines }} sync_authentication_methods: vault.auth_methods_synced: - - configs: - - type: token - path: token - description: token based credentials - config: - default_lease_ttl: 0 - max_lease_ttl: 0 + - configs: {{ vault.auth_methods }} sync_audit_devices: vault.audit_devices_synced: - - configs: - - type: file - path: file_log - description: first audit device - config: - file_path: /etc/vault/logs/audit.log + - configs: {{ vault.audit_devices }} sync_policies: vault.policies_synced: - - policies: - - name: xyz_admin - content: - path: - '*': {capabilities: [read, create]} - 'stage/*': {capabilities: [read, create, update, delete, list]} - - - name: abc_admin - content: - path: - '*': {capabilities: [read, create]} - 'stage/*': {capabilities: [read, create]} + - policies: {{ vault.policies }} diff --git a/tests/Gopkg.lock b/tests/Gopkg.lock deleted file mode 100644 index 20009c3..0000000 --- a/tests/Gopkg.lock +++ /dev/null @@ -1,285 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "UT" - revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" - version = "v0.0.1" - -[[projects]] - digest = "1:9a125bb28b817431abc860b051cf10f41febe7830749e6d460826c4e253994be" - name = "github.com/gruntwork-io/terratest" - packages = [ - "modules/collections", - "modules/customerrors", - "modules/files", - "modules/logger", - "modules/retry", - "modules/shell", - "modules/ssh", - "modules/terraform", - ] - pruneopts = "UT" - revision = "367843c5fa8429d84d2e9b78402546316b54ee91" - version = "v0.17.6" - -[[projects]] - digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b" - name = "github.com/hashicorp/errwrap" - packages = ["."] - pruneopts = "UT" - revision = "8a6fb523712970c966eefc6b39ed2c5e74880354" - version = "v1.0.0" - -[[projects]] - digest = "1:af105c7c5dc0b4ae41991f122cae860b9600f7d226072c2a83127048c991660c" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - pruneopts = "UT" - revision = "eda1e5db218aad1db63ca4642c8906b26bcf2744" - version = "v0.5.1" - -[[projects]] - digest = "1:cf6b61e1b4c26b0c7526cee4a0cee6d8302b17798af4b2a56a90eedac0aef11a" - name = "github.com/hashicorp/go-hclog" - packages = ["."] - pruneopts = "UT" - revision = "5ccdce08c75b6c7b37af61159f13f6a4f5e2e928" - version = "v0.9.2" - -[[projects]] - digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7" - name = "github.com/hashicorp/go-multierror" - packages = ["."] - pruneopts = "UT" - revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1" - version = "v1.0.0" - -[[projects]] - digest = "1:bc4393e7d030ef4a548d9643997e2ae9064ed93d7ed140569b27336ee3b77464" - name = "github.com/hashicorp/go-retryablehttp" - packages = ["."] - pruneopts = "UT" - revision = "a83ad44d6a5fc343d7c4babf601092b3c189f402" - version = "v0.6.2" - -[[projects]] - digest = "1:7b893c9e1181e224506c523777dea0d16f4bd20a7627b100cc800e14229f405c" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - pruneopts = "UT" - revision = "df8e78a645e18d56ed7bb9ae10ffb8174ab892e2" - version = "v1.0.1" - -[[projects]] - digest = "1:8abc57884881876d02f467bb7d4ed7ce3a58dac3f8f7ba60579ce4ffc6afd7e1" - name = "github.com/hashicorp/go-sockaddr" - packages = ["."] - pruneopts = "UT" - revision = "c7188e74f6acae5a989bdc959aa779f8b9f42faf" - version = "v1.0.2" - -[[projects]] - digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token", - ] - pruneopts = "UT" - revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" - version = "v1.0.0" - -[[projects]] - digest = "1:d22f47ec7404382d5d99152ae979ca0cc7a740839b1520c645747acb0b4c5391" - name = "github.com/hashicorp/vault" - packages = [ - "api", - "sdk/helper/compressutil", - "sdk/helper/consts", - "sdk/helper/hclutil", - "sdk/helper/jsonutil", - "sdk/helper/parseutil", - "sdk/helper/strutil", - ] - pruneopts = "UT" - revision = "a1a5f0d798d4181778259403fae0802fff46915a" - version = "v1.2.2" - -[[projects]] - digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "UT" - revision = "af06845cf3004701891bf4fdb884bfe4920b3727" - version = "v1.1.0" - -[[projects]] - digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "UT" - revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" - version = "v1.1.2" - -[[projects]] - digest = "1:b5e6db1f0095a7427712ba7f5890d6c0cb6b61d6f9d76bc5a4e3d2344461e652" - name = "github.com/pierrec/lz4" - packages = [ - ".", - "internal/xxh32", - ] - pruneopts = "UT" - revision = "8ef35db8296124c4969aab929c16c91c3cb2c8a0" - version = "v2.2.6" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:6baa565fe16f8657cf93469b2b8a6c61a277827734400d27e44d589547297279" - name = "github.com/ryanuber/go-glob" - packages = ["."] - pruneopts = "UT" - revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53" - version = "v1.0.0" - -[[projects]] - digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - ] - pruneopts = "UT" - revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" - version = "v1.4.0" - -[[projects]] - branch = "master" - digest = "1:c4c38e643ce0e70332dd5e53265ba2eb5193173f3df83a418708af5b0f478a1b" - name = "golang.org/x/crypto" - packages = [ - "curve25519", - "ed25519", - "ed25519/internal/edwards25519", - "internal/chacha20", - "internal/subtle", - "pbkdf2", - "poly1305", - "ssh", - "ssh/agent", - ] - pruneopts = "UT" - revision = "60c769a6c58655dab1b9adac0d58967dd517cfba" - -[[projects]] - branch = "master" - digest = "1:d32feaee571ce6344189c3b3ff66c0619ecb3f2d7e1869164a6ea9e5a80a025f" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - ] - pruneopts = "UT" - revision = "74dc4d7220e7acc4e100824340f3e66577424772" - -[[projects]] - branch = "master" - digest = "1:9289797869517c73400e7b5a93beefbfc86e06f56e3ab03f38fb0d575e685277" - name = "golang.org/x/sys" - packages = ["cpu"] - pruneopts = "UT" - revision = "fde4db37ae7ad8191b03d30d27f258b5291ae4e3" - -[[projects]] - digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - version = "v0.3.2" - -[[projects]] - branch = "master" - digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "UT" - revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" - -[[projects]] - digest = "1:9593bab40e981b1f90b7e07faeab0d09b75fe338880d08880f986a9d3283c53f" - name = "gopkg.in/square/go-jose.v2" - packages = [ - ".", - "cipher", - "json", - "jwt", - ] - pruneopts = "UT" - revision = "730df5f748271903322feb182be83b43ebbbe27d" - version = "v2.3.1" - -[[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/gruntwork-io/terratest/modules/logger", - "github.com/gruntwork-io/terratest/modules/retry", - "github.com/gruntwork-io/terratest/modules/terraform", - "github.com/hashicorp/vault/api", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/tests/Gopkg.toml b/tests/Gopkg.toml deleted file mode 100644 index d951b36..0000000 --- a/tests/Gopkg.toml +++ /dev/null @@ -1,34 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/gruntwork-io/terratest" - version = "0.17.5" - -[prune] - go-tests = true - unused-packages = true diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf index 71f5fa0..47dd9cd 100644 --- a/tests/vault-py2/main.tf +++ b/tests/vault-py2/main.tf @@ -34,7 +34,7 @@ module "base" { # Watchmaker settings watchmaker_config = var.watchmaker_config - toggle_update = "B" + toggle_update = "A" } output "cluster_url" { diff --git a/tests/vault-py2/pillar/top.sls b/tests/vault-py2/pillar/top.sls new file mode 100644 index 0000000..7e34ce6 --- /dev/null +++ b/tests/vault-py2/pillar/top.sls @@ -0,0 +1,3 @@ +base: + "*": + - vault diff --git a/tests/vault-py2/pillar/vault/init.sls b/tests/vault-py2/pillar/vault/init.sls new file mode 100644 index 0000000..f3a2a97 --- /dev/null +++ b/tests/vault-py2/pillar/vault/init.sls @@ -0,0 +1,48 @@ +vault: + lookup: + api_port: ${api_port} + cluster_port: ${cluster_port} + dynamodb_table: ${dynamodb_table} + inbound_cidrs: ${inbound_cidrs} + kms_key_id: ${kms_key_id} + logs_path: ${logs_path} + logs_dir: ${logs_dir} + region: ${region} + ssm_path: ${ssm_path} + version: ${vault_version} + + secrets_engines: + - type: kv + path: services + description: Sevices specific folders + config: + default_lease_ttl: 1800 + max_lease_ttl: 1800 + + auth_methods: + - type: token + path: token + description: token based credentials + config: + default_lease_ttl: 0 + max_lease_ttl: 0 + + audit_devices: + - type: file + path: file_log + description: first audit device + config: + file_path: /etc/vault/logs/audit.log + + policies: + - name: xyz_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create, update, delete, list]} + + - name: abc_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create]} diff --git a/variables.tf b/variables.tf index 73972c8..fe4c829 100644 --- a/variables.tf +++ b/variables.tf @@ -44,6 +44,11 @@ variable "vault_version" { description = "Version of Vault to be installed on servers" } +variable "vault_pillar_path" { + type = string + description = "Specify the path to vault pillar" +} + variable "vault_url" { type = string description = "The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net" @@ -88,12 +93,6 @@ variable "ami_name_regex" { default = "spel-minimal-centos-7-hvm-\\d{4}\\.\\d{2}\\.\\d{1}\\.x86_64-gp2" } -variable "vault_configs_path" { - type = string - description = "(Optional) Path to directory that contains configuration files for vault" - default = null -} - variable "instance_type" { type = string description = "Amazon EC2 instance type" From 7ade89a894354de0b090d93a522f95141909051a Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 25 Sep 2019 09:18:12 -0400 Subject: [PATCH 31/34] Updates test cases --- Makefile | 8 +- outputs.tf | 2 +- tests/go.mod | 18 +++++ tests/go.sum | 151 +++++++++++++++++++++++++++++++++++ tests/vault-py2/main.tf | 6 +- tests/vault-py2/variables.tf | 5 ++ tests/vault-py3/main.tf | 8 +- tests/vault-py3/variables.tf | 23 ++++++ 8 files changed, 211 insertions(+), 10 deletions(-) create mode 100644 tests/go.mod create mode 100644 tests/go.sum diff --git a/Makefile b/Makefile index 8bbadbd..e4eff0d 100755 --- a/Makefile +++ b/Makefile @@ -113,8 +113,10 @@ docs/generate: | guard/program/terraform-docs cat $(README_PARTS) > $(README_FILE) @ echo "[$@]: Documentation files creation complete!" -terratest/install: | guard/program/go guard/program/dep - cd tests && dep ensure +terratest/install: | guard/program/go + cd tests && go mod init terraform-aws-vault/tests + cd tests && go build ./... + cd tests && go mod tidy -terratest/test: | guard/program/go guard/program/dep +terratest/test: | guard/program/go cd tests && go test -v -timeout 40m diff --git a/outputs.tf b/outputs.tf index c3bd94c..1a02e21 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,4 +1,4 @@ output "vault_url" { description = "URL to access Vault UI" - value = join("", ["https://", aws_route53_record.this.fqdn]) + value = "https://${aws_route53_record.this.fqdn}" } diff --git a/tests/go.mod b/tests/go.mod new file mode 100644 index 0000000..a9c1e77 --- /dev/null +++ b/tests/go.mod @@ -0,0 +1,18 @@ +module terraform-aws-vault/tests + +go 1.12 + +require ( + github.com/frankban/quicktest v1.5.0 // indirect + github.com/gruntwork-io/terratest v0.17.6 + github.com/hashicorp/go-retryablehttp v0.6.2 // indirect + github.com/hashicorp/vault/api v1.0.5-0.20190814205542-3b036e58e950 + github.com/magiconair/properties v1.8.1 // indirect + github.com/pierrec/lz4 v2.2.6+incompatible // indirect + github.com/stretchr/testify v1.4.0 // indirect + golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 // indirect + golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 // indirect + golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // indirect + golang.org/x/text v0.3.2 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect +) diff --git a/tests/go.sum b/tests/go.sum new file mode 100644 index 0000000..1e64734 --- /dev/null +++ b/tests/go.sum @@ -0,0 +1,151 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.5.0 h1:Tb4jWdSpdjKzTUicPnY61PZxKbDoGa7ABbrReT3gQVY= +github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/gruntwork-io/terratest v0.17.6 h1:efnWnoz3GvM6VGHvRebGaO41ne4ZTKMvMqMf8V5vY58= +github.com/gruntwork-io/terratest v0.17.6/go.mod h1:NjUn6YXA5Skxt8Rs20t3isYx5Rl+EgvGB8/+RRXddqk= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.5-0.20190814205542-3b036e58e950 h1:UIUFPNdUvzyQVp3VxX3SyQ4WclDtso9/7LZFtHtQPlw= +github.com/hashicorp/vault/api v1.0.5-0.20190814205542-3b036e58e950/go.mod h1:t4IAg1Is4bLUtTq8cGgeUh0I8oDRBXPk2bM1Jvg/nWA= +github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b h1:uC3aN7xIG8gPNm9cbNY05OJ44cYfAv5Rn+QLSBsFq1s= +github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= +github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf index 47dd9cd..8ffdc2b 100644 --- a/tests/vault-py2/main.tf +++ b/tests/vault-py2/main.tf @@ -27,9 +27,9 @@ module "base" { certificate_arn = var.certificate_arn # Vault settings - vault_version = var.vault_version - vault_configs_path = "${path.module}/.configs" - dynamodb_table = var.dynamodb_table + vault_version = var.vault_version + vault_pillar_path = var.vault_pillar_path + dynamodb_table = var.dynamodb_table # Watchmaker settings watchmaker_config = var.watchmaker_config diff --git a/tests/vault-py2/variables.tf b/tests/vault-py2/variables.tf index 78ee17d..e529dd9 100644 --- a/tests/vault-py2/variables.tf +++ b/tests/vault-py2/variables.tf @@ -29,6 +29,11 @@ variable "vault_version" { type = string } +variable "vault_pillar_path" { + type = string + description = "Specify the path to vault pillar" +} + variable "kms_key_id" { description = "Id of an AWS KMS key use for auto unseal operation when vault is intialize" type = string diff --git a/tests/vault-py3/main.tf b/tests/vault-py3/main.tf index 210af4c..045c3fc 100644 --- a/tests/vault-py3/main.tf +++ b/tests/vault-py3/main.tf @@ -21,14 +21,16 @@ module "vault-py3" { ec2_subnet_ids = var.ec2_subnet_ids lb_subnet_ids = var.lb_subnet_ids - cloudwatch_agent_url = var.cloudwatch_agent_url + cloudwatch_agent_url = var.cloudwatch_agent_url + ec2_extra_security_group_ids = var.ec2_extra_security_group_ids domain_name = var.domain_name route53_zone_id = var.route53_zone_id + certificate_arn = var.certificate_arn # Vault settings - vault_version = "1.2.0" - dynamodb_table = null + vault_version = var.vault_version + dynamodb_table = var.dynamodb_table # Watchmaker settings watchmaker_config = var.watchmaker_config diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf index 0183b38..cf4bda8 100644 --- a/tests/vault-py3/variables.tf +++ b/tests/vault-py3/variables.tf @@ -45,3 +45,26 @@ variable "watchmaker_config" { description = "(Optional) URL to a Watchmaker config file" default = "" } + +variable "vault_version" { + description = "Version of Vault to be installed on servers" + type = string +} + +variable "dynamodb_table" { + description = "Name of the Dynamodb to be used as storage backend for Vault" + type = string + default = null +} + +variable "certificate_arn" { + type = string + description = "The ARN of the default SSL server certificate to be use for HTTPS lb listener." + default = null +} + +variable "ec2_extra_security_group_ids" { + type = list(string) + description = "List of additional security groups to add to EC2 instances" + default = [] +} From cfdde2b242e88bdb1c3fe36863223092dd5f6657 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 25 Sep 2019 09:18:39 -0400 Subject: [PATCH 32/34] Updates vagrant file to pull the latest spel image --- Vagrantfile | 31 +++++++++++++++++-------------- salt/vault/configure.sls | 2 ++ salt/vault/install.sls | 2 +- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 8cf14df..73f8601 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -5,8 +5,9 @@ # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing. -BOX_IMAGE = "centos/7" -NODE_COUNT = 1 + +BOX_IMAGE = "plus3it/spel-minimal-centos-7" +NODE_COUNT = 2 Vagrant.configure("2") do |config| # The most common configuration options are documented and commented below. @@ -80,12 +81,16 @@ Vagrant.configure("2") do |config| yum update -y && yum upgrade -y yum install -y curl unzip epel-release yum-utils jq - yum install -y https://repo.saltstack.com/py3/redhat/salt-py3-repo-2018.3.el7.noarch.rpm + # Python3 + # yum install -y https://repo.saltstack.com/py3/redhat/salt-py3-repo-2018.3.el7.noarch.rpm + + # Python2 + # yum install -y https://repo.saltstack.com/yum/redhat/salt-repo-2018.3.el7.noarch.rpm - yum clean expire-cache + # yum clean expire-cache - yum install salt-master -y - yum install salt-minion -y + # yum install salt-master -y + # yum install salt-minion -y echo 'Change permission for dirs' chmod +x /usr/local/bin/ @@ -103,17 +108,15 @@ Vagrant.configure("2") do |config| echo 'export VAULT_TOKEN=root' >> /home/vagrant/.bash_profile echo 'alias l="ls -lah"' >> /home/vagrant/.bash_profile fi - - SHELL - config.vm.provision "shell", inline: <<-SHELL + # config.vm.provision "shell", inline: <<-SHELL - echo "Setting the required salt grains for vault..." - salt-call --local grains.setval vault '{"dev_mode": true, "dev_configs": "-dev -dev-root-token-id=root", "api_port": 8200, "cluster_port": 8201}' + # echo "Setting the required salt grains for vault..." + # salt-call --local grains.setval vault '{"dev_mode": true, "dev_configs": "-dev -dev-root-token-id=root", "api_port": 8200, "cluster_port": 8201}' - echo "Updating salt states/modules/utils/grains..." - salt-call --local saltutil.sync_all - SHELL + # echo "Updating salt states/modules/utils/grains..." + # salt-call --local saltutil.sync_all + # SHELL end diff --git a/salt/vault/configure.sls b/salt/vault/configure.sls index cccea3b..dbc7304 100644 --- a/salt/vault/configure.sls +++ b/salt/vault/configure.sls @@ -11,7 +11,9 @@ vault_configure_service_file: - name: /etc/vault/conf.d/server.hcl - template: jinja - defaults: +{%- if not vault.dev_mode %} ip_address: {{ grains.ip_interfaces.eth0.0 }} +{%- endif %} api_port: {{ vault.api_port }} cluster_port: {{ vault.cluster_port }} region: {{ vault.region }} diff --git a/salt/vault/install.sls b/salt/vault/install.sls index 236beb5..dbb0380 100644 --- a/salt/vault/install.sls +++ b/salt/vault/install.sls @@ -68,7 +68,7 @@ install_python_dependencies: {%- if salt.grains.get('pythonversion')[0] | int == 3 %} install_pip_module: pkg.installed: - - name: python36-pip + - name: python3-pip install_python_dependencies: pip.installed: From ac1b62bdaf1b026f7aa67db2e42f0af5f7522441 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Wed, 25 Sep 2019 09:27:56 -0400 Subject: [PATCH 33/34] Update readme to expose new vault_pillar_path variable --- .gitignore | 2 + README.md | 2 +- tests/go.mod | 18 ------ tests/go.sum | 151 --------------------------------------------------- 4 files changed, 3 insertions(+), 170 deletions(-) delete mode 100644 tests/go.mod delete mode 100644 tests/go.sum diff --git a/.gitignore b/.gitignore index 4be99ad..427191b 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,8 @@ # ignore go files vendor/ .configs/ +go.mod +go.sum #Vagrant related files diff --git a/README.md b/README.md index afb9732..8fea6ce 100755 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | route53\_zone\_id | Hosted zone ID Route 53 hosted zone | string | n/a | yes | | tags | (Optional) list of tags to include with resource | map(string) | `` | no | | toggle\_update | (Optional) Toggle that triggers a stack update by modifying the launch config, resulting in new instances; must be one of: A or B | string | `"A"` | no | -| vault\_configs\_path | (Optional) Path to directory that contains configuration files for vault | string | `"null"` | no | +| vault\_pillar\_path | Specify the path to vault pillar | string | n/a | yes | | vault\_url | The DNS address that vault will be accessible at. Stack name will be used as the url when value is set to empty. Example: vault.domain.net | string | `"null"` | no | | vault\_version | Version of Vault to be installed on servers | string | n/a | yes | | watchmaker\_admin\_groups | (Optional) Colon-separated list of domain groups that should have admin permissions on the EC2 instance | string | `""` | no | diff --git a/tests/go.mod b/tests/go.mod deleted file mode 100644 index a9c1e77..0000000 --- a/tests/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module terraform-aws-vault/tests - -go 1.12 - -require ( - github.com/frankban/quicktest v1.5.0 // indirect - github.com/gruntwork-io/terratest v0.17.6 - github.com/hashicorp/go-retryablehttp v0.6.2 // indirect - github.com/hashicorp/vault/api v1.0.5-0.20190814205542-3b036e58e950 - github.com/magiconair/properties v1.8.1 // indirect - github.com/pierrec/lz4 v2.2.6+incompatible // indirect - github.com/stretchr/testify v1.4.0 // indirect - golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 // indirect - golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 // indirect - golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // indirect - golang.org/x/text v0.3.2 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect -) diff --git a/tests/go.sum b/tests/go.sum deleted file mode 100644 index 1e64734..0000000 --- a/tests/go.sum +++ /dev/null @@ -1,151 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.5.0 h1:Tb4jWdSpdjKzTUicPnY61PZxKbDoGa7ABbrReT3gQVY= -github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/gruntwork-io/terratest v0.17.6 h1:efnWnoz3GvM6VGHvRebGaO41ne4ZTKMvMqMf8V5vY58= -github.com/gruntwork-io/terratest v0.17.6/go.mod h1:NjUn6YXA5Skxt8Rs20t3isYx5Rl+EgvGB8/+RRXddqk= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= -github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= -github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.0.5-0.20190814205542-3b036e58e950 h1:UIUFPNdUvzyQVp3VxX3SyQ4WclDtso9/7LZFtHtQPlw= -github.com/hashicorp/vault/api v1.0.5-0.20190814205542-3b036e58e950/go.mod h1:t4IAg1Is4bLUtTq8cGgeUh0I8oDRBXPk2bM1Jvg/nWA= -github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b h1:uC3aN7xIG8gPNm9cbNY05OJ44cYfAv5Rn+QLSBsFq1s= -github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 9f0b0706d43b4d2642a6da58d17cc9dc8b34eaeb Mon Sep 17 00:00:00 2001 From: Triet Le Date: Tue, 1 Oct 2019 16:05:59 -0400 Subject: [PATCH 34/34] Replace S3 bucket module with S3 Bucket Resource --- Makefile | 15 +- main.tf | 38 +- modules/iam/iam_policy.json | 6 +- salt/_modules/vault.py | 766 ++++++++++++-------------- salt/vault/map.jinja | 3 - salt/vault/service.sls | 22 +- scripts/terraform-docs.awk | 90 --- scripts/terraform-docs.sh | 14 - tests/module_test.go | 59 +- tests/vault-py2/main.tf | 2 +- tests/vault-py2/variables.tf | 2 +- tests/vault-py3/main.tf | 5 +- tests/vault-py3/pillar/top.sls | 3 + tests/vault-py3/pillar/vault/init.sls | 48 ++ tests/vault-py3/variables.tf | 5 + 15 files changed, 475 insertions(+), 603 deletions(-) delete mode 100644 scripts/terraform-docs.awk delete mode 100755 scripts/terraform-docs.sh create mode 100644 tests/vault-py3/pillar/top.sls create mode 100644 tests/vault-py3/pillar/vault/init.sls diff --git a/Makefile b/Makefile index e4eff0d..724b8ff 100755 --- a/Makefile +++ b/Makefile @@ -78,6 +78,11 @@ shellcheck/install: $(BIN_DIR) guard/program/xz rm -rf $(@D)-* $(@D) --version +tfdocs-awk/install: $(BIN_DIR) +tfdocs-awk/install: ARCHIVE := https://github.com/plus3it/tfdocs-awk/archive/master.tar.gz +tfdocs-awk/install: + $(CURL) $(ARCHIVE) | tar -C $(BIN_DIR) --strip-components=1 --wildcards '*.sh' --wildcards '*.awk' -xzvf - + terraform/lint: | guard/program/terraform @ echo "[$@]: Linting Terraform files..." terraform fmt -check=true -diff=true @@ -100,15 +105,15 @@ json/format: | guard/program/jq $(FIND_JSON) | $(XARGS) bash -c 'echo "$$(jq --indent 4 -S . "{}")" > "{}"' @ echo "[$@]: Successfully formatted JSON files!" -docs/%: README_PARTS := _docs/MAIN.md <(echo) <(./scripts/terraform-docs.sh markdown table .) +docs/%: README_PARTS := _docs/MAIN.md <(echo) <($(BIN_DIR)/terraform-docs.sh markdown table .) docs/%: README_FILE ?= README.md -docs/lint: | guard/program/terraform-docs +docs/lint: | guard/program/terraform-docs tfdocs-awk/install @ echo "[$@]: Linting documentation files.." diff $(README_FILE) <(cat $(README_PARTS)) @ echo "[$@]: Documentation files PASSED lint test!" -docs/generate: | guard/program/terraform-docs +docs/generate: | guard/program/terraform-docs tfdocs-awk/install @ echo "[$@]: Creating documentation files.." cat $(README_PARTS) > $(README_FILE) @ echo "[$@]: Documentation files creation complete!" @@ -119,4 +124,6 @@ terratest/install: | guard/program/go cd tests && go mod tidy terratest/test: | guard/program/go - cd tests && go test -v -timeout 40m + cd tests && go test -count=1 -timeout 60m + +test: terratest/test diff --git a/main.tf b/main.tf index b88bb5d..33d8c92 100644 --- a/main.tf +++ b/main.tf @@ -21,9 +21,9 @@ locals { logs_path = "${local.logs_dir}/state.vault" enabled_repos = "epel" default_inbound_cdirs = ["10.0.0.0/16"] - s3_appscript_url = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.appscript_file_name}" - s3_salt_vault_content = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.archive_file_name}" - s3_pillar_url = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.pillar_file_name}" + s3_appscript_url = "s3://${aws_s3_bucket.this.id}/${local.appscript_file_name}" + s3_salt_vault_content = "s3://${aws_s3_bucket.this.id}/${local.archive_file_name}" + s3_pillar_url = "s3://${aws_s3_bucket.this.id}/${local.pillar_file_name}" archive_path = join("/", [path.module, ".files", local.archive_file_name]) pillar_path = join("/", [path.cwd, ".files", local.pillar_file_name]) appscript_path = join("/", [path.module, "scripts", local.appscript_file_name]) @@ -106,23 +106,22 @@ data "archive_file" "pillar" { } resource "aws_s3_bucket_object" "pillar" { - bucket = module.s3_bucket.this_s3_bucket_id + bucket = aws_s3_bucket.this.id key = local.pillar_file_name source = local.pillar_path etag = data.archive_file.pillar.output_md5 } -# Manage S3 bucket module -module "s3_bucket" { - source = "git::https://github.com/terraform-aws-modules/terraform-aws-s3-bucket.git?ref=v0.1.0" - +# Manage S3 bucket +resource "aws_s3_bucket" "this" { bucket = local.bucket_name -} + tags = local.tags +} resource "aws_s3_bucket_policy" "this" { - bucket = module.s3_bucket.this_s3_bucket_id - policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = module.s3_bucket.this_s3_bucket_arn }) + bucket = aws_s3_bucket.this.id + policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = aws_s3_bucket.this.arn }) } # Manage IAM module @@ -131,7 +130,7 @@ module "iam" { role_name = local.role_name policy_vars = { - bucket_name = module.s3_bucket.this_s3_bucket_id + bucket_name = aws_s3_bucket.this.id dynamodb_table = local.dynamodb_table kms_key_id = local.kms_key_id stack_name = var.name @@ -155,7 +154,7 @@ data "archive_file" "salt" { } resource "aws_s3_bucket_object" "salt_zip" { - bucket = module.s3_bucket.this_s3_bucket_id + bucket = aws_s3_bucket.this.id key = local.archive_file_name source = local.archive_path etag = data.archive_file.salt.output_md5 @@ -171,7 +170,7 @@ data "template_file" "appscript" { } resource "aws_s3_bucket_object" "app_script" { - bucket = module.s3_bucket.this_s3_bucket_id + bucket = aws_s3_bucket.this.id key = local.appscript_file_name content = data.template_file.appscript.rendered etag = md5(data.template_file.appscript.rendered) @@ -285,7 +284,7 @@ resource "aws_lb_target_group" "this" { # /sys/health will return 200 only if the vault instance # is the leader. Meaning there will only ever be one healthy # instance, but a failure will cause a new instance to - # be healthy automatically. This healthceck path prevents + # be healthy automatically. This healthcheck path prevents # unnecessary redirect loops by not sending traffic to # followers, which always just route traffic to the master health_check { @@ -414,16 +413,15 @@ resource "aws_appautoscaling_policy" "this" { # Manage autoscaling group module "autoscaling_group" { - source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.7" + source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=2.0.0" Name = var.name OnFailureAction = "" DisableRollback = "true" - AmiId = data.aws_ami.this.id - AmiDistro = "CentOS" - AppScriptUrl = local.s3_appscript_url - CfnBootstrapUtilsUrl = var.cfn_bootstrap_utils_url + AmiId = data.aws_ami.this.id + AmiDistro = "CentOS" + AppScriptUrl = local.s3_appscript_url CfnEndpointUrl = var.cfn_endpoint_url CloudWatchAgentUrl = var.cloudwatch_agent_url diff --git a/modules/iam/iam_policy.json b/modules/iam/iam_policy.json index b8ef8ae..5f98608 100644 --- a/modules/iam/iam_policy.json +++ b/modules/iam/iam_policy.json @@ -7,7 +7,7 @@ ], "Effect": "Allow", "Resource": [ - "arn:${partition}:cloudformation:${region}:${account_id}:stack/${stack_name}*" + "arn:${partition}:cloudformation:${region}:${account_id}:stack/${stack_name}/*" ], "Sid": "CfnActions" }, @@ -63,9 +63,9 @@ ], "Effect": "Allow", "Resource": [ - "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${stack_name}*" + "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${stack_name}:log-stream:*" ], - "Sid": "CloudWatchLogActions" + "Sid": "CloudWatchLogStreamActions" }, { "Action": [ diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 6783bd8..3aaa065 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -168,15 +168,11 @@ def get_remote_policies(self, client, ret): """ log.info('Retrieving policies from vault...') polices = [] - try: - policies_resp = client.sys.list_policies() + policies_resp = client.sys.list_policies() - for policy in policies_resp['data']['policies']: - if not (policy == 'root' or policy == 'default'): - polices.append(policy) - - except Exception: - raise + for policy in policies_resp['data']['policies']: + if not (policy == 'root' or policy == 'default'): + polices.append(policy) log.info('Finished retrieving policies from vault.') @@ -193,23 +189,20 @@ def push_policies(self, client, remote_policies, local_policies, ret): """ log.info('Pushing policies from local config folder to vault...') new_policies = [] - try: - for policy in local_policies: - client.sys.create_or_update_policy( - name=policy['name'], - policy=policy['content'] - ) - if policy['name'] in remote_policies: - log.debug('Policy "%s" has been updated.', policy["name"]) - else: - new_policies.append(policy["name"]) - log.debug('Policy "%s" has been created.', policy["name"]) + for policy in local_policies: + client.sys.create_or_update_policy( + name=policy['name'], + policy=policy['content'] + ) + if policy['name'] in remote_policies: + log.debug('Policy "%s" has been updated.', policy["name"]) + else: + new_policies.append(policy["name"]) + log.debug('Policy "%s" has been created.', policy["name"]) - # Build return object - ret['changes']['old'] = remote_policies - ret['changes']['new'] = new_policies or "No changes" - except Exception: - raise + # Build return object + ret['changes']['old'] = remote_policies + ret['changes']['new'] = new_policies or "No changes" log.info('Finished pushing policies local config folder to vault.') @@ -224,19 +217,16 @@ def cleanup_policies(self, client, remote_policies, local_policies, ret): """ log.info('Cleaning up vault policies...') has_change = False - try: - for policy in remote_policies: - if policy not in [pol['name'] for pol in local_policies]: - log.debug( - '"%s" is not found in configs folder. Removing it from vault...', policy) - has_change = True - client.sys.delete_policy(name=policy) - log.debug('"%s" is removed.', policy) - - if has_change: - ret['changes']['new'] = [ob['name'] for ob in local_policies] - except Exception: - raise + for policy in remote_policies: + if policy not in [pol['name'] for pol in local_policies]: + log.debug( + '"%s" is not found in configs folder. Removing it from vault...', policy) + has_change = True + client.sys.delete_policy(name=policy) + log.debug('"%s" is removed.', policy) + + if has_change: + ret['changes']['new'] = [ob['name'] for ob in local_policies] log.info('Finished cleaning up vault policies.') @@ -246,9 +236,9 @@ class VaultAuthManager(): """ def __init__(self): - """Initialize Authentication Manager - """ - log.info("Initializing Vault Auth Manager...") + """Initialize Authentication Manager + """ + log.info("Initializing Vault Auth Manager...") def get_remote_auth_methods(self, client, ret): """Retrieve authentication methods from remote vault server @@ -264,20 +254,17 @@ def get_remote_auth_methods(self, client, ret): auth_resp = client.sys.list_auth_methods() auth_methods = [] - try: - for auth_method in auth_resp['data']: - auth_methods.append( - VaultAuthMethod( - type=auth_resp[auth_method]['type'], - path=(auth_resp[auth_method]["path"] - if 'path' in auth_resp[auth_method] else auth_method), - description=auth_resp[auth_method]["description"], - config=OrderedDict( - sorted(auth_resp[auth_method]["config"].items())) - ) + for auth_method in auth_resp['data']: + auth_methods.append( + VaultAuthMethod( + type=auth_resp[auth_method]['type'], + path=(auth_resp[auth_method]["path"] + if 'path' in auth_resp[auth_method] else auth_method), + description=auth_resp[auth_method]["description"], + config=OrderedDict( + sorted(auth_resp[auth_method]["config"].items())) ) - except Exception: - raise + ) log.info('Finished retrieving auth methods from vault.') @@ -296,32 +283,29 @@ def populate_local_auth_methods(self, configs, ret): log.info('Populating local auth methods...') auth_methods = [] - try: - for auth_method in configs: - auth_config = None - extra_config = None - - if "auth_config" in auth_method: - auth_config = OrderedDict( - sorted(auth_method["auth_config"].items())) - - if "extra_config" in auth_method: - extra_config = OrderedDict( - sorted(auth_method["extra_config"].items())) - - auth_methods.append( - VaultAuthMethod( - type=auth_method["type"], - path=auth_method["path"], - description=auth_method["description"], - config=OrderedDict( - sorted(auth_method["config"].items())), - auth_config=auth_config, - extra_config=extra_config - ) + for auth_method in configs: + auth_config = None + extra_config = None + + if "auth_config" in auth_method: + auth_config = OrderedDict( + sorted(auth_method["auth_config"].items())) + + if "extra_config" in auth_method: + extra_config = OrderedDict( + sorted(auth_method["extra_config"].items())) + + auth_methods.append( + VaultAuthMethod( + type=auth_method["type"], + path=auth_method["path"], + description=auth_method["description"], + config=OrderedDict( + sorted(auth_method["config"].items())), + auth_config=auth_config, + extra_config=extra_config ) - except Exception: - raise + ) log.info('Finished populating local auth methods.') @@ -341,88 +325,84 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): new_auth_methods = [] ldap_groups = [] - try: - for auth_method in local_methods: - log.debug('Checking if auth method "%s" is enabled...', - auth_method.path) - if auth_method in remote_methods: - log.debug( - 'Auth method "%s" is already enabled. Tuning...', auth_method.path) - client.sys.tune_auth_method( - path=auth_method.path, - description=auth_method.description, - default_lease_ttl=auth_method.config["default_lease_ttl"], - max_lease_ttl=auth_method.config["max_lease_ttl"] - ) - log.debug('Auth method "%s" is tuned.', auth_method.type) - else: - log.debug( - 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) - client.sys.enable_auth_method( - method_type=auth_method.type, - path=auth_method.path, - description=auth_method.description, - config=auth_method.config - ) - log.debug('Auth method "%s" is enabled.', auth_method.type) - new_auth_methods.append(auth_method.type) - - # Provision config for specific auth method - if auth_method.auth_config: - if auth_method.type == "ldap": - log.debug('Provisioning configuration for LDAP...') - client.auth.ldap.configure(**auth_method.auth_config) - log.debug('Configuration for LDAP is provisioned.') - else: - log.debug( - 'Auth method "%s" does not contain any specific configurations.', auth_method.type) - - if auth_method.extra_config: - log.debug( - 'Provisioning extra configurations for auth method "%s"', auth_method.type) - # Get LDAP group mapping from vault - ldap_list_group_response = client.auth.ldap.list_groups() - if ldap_list_group_response: - ldap_groups = ldap_list_group_response["data"]["keys"] - - log.debug("LDAP groups from vault: %s", str(ldap_groups)) - - # Update LDAP group mapping - log.debug( - 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) - local_config_groups = auth_method.extra_config["group_policy_map"] - for key in local_config_groups: - log.debug('LDAP Group ["%s"] -> Policies %s', - str(key), local_config_groups[key]) - - client.auth.ldap.create_or_update_group( - name=key, - policies=local_config_groups[key] - ) - - # Clean up LDAP group mapping - if ldap_groups: - for group in ldap_groups: - if group in {k.lower(): v for k, v in local_config_groups.items()}: - log.debug( - 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) - else: - log.debug( - 'LDAP group mapping ["%s"] does not exist in configuration, deleting...', group) - client.auth.ldap.delete_group(name=group) - log.debug( - 'LDAP group mapping ["%s"] deleted.', group) - else: - log.debug( - 'Auth method "%s" does not contain any extra configurations.', auth_method.type + for auth_method in local_methods: + log.debug('Checking if auth method "%s" is enabled...', + auth_method.path) + if auth_method in remote_methods: + log.debug( + 'Auth method "%s" is already enabled. Tuning...', auth_method.path) + client.sys.tune_auth_method( + path=auth_method.path, + description=auth_method.description, + default_lease_ttl=auth_method.config["default_lease_ttl"], + max_lease_ttl=auth_method.config["max_lease_ttl"] + ) + log.debug('Auth method "%s" is tuned.', auth_method.type) + else: + log.debug( + 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) + client.sys.enable_auth_method( + method_type=auth_method.type, + path=auth_method.path, + description=auth_method.description, + config=auth_method.config + ) + log.debug('Auth method "%s" is enabled.', auth_method.type) + new_auth_methods.append(auth_method.type) + + # Provision config for specific auth method + if auth_method.auth_config: + if auth_method.type == "ldap": + log.debug('Provisioning configuration for LDAP...') + client.auth.ldap.configure(**auth_method.auth_config) + log.debug('Configuration for LDAP is provisioned.') + else: + log.debug( + 'Auth method "%s" does not contain any specific configurations.', auth_method.type) + + if auth_method.extra_config: + log.debug( + 'Provisioning extra configurations for auth method "%s"', auth_method.type) + # Get LDAP group mapping from vault + ldap_list_group_response = client.auth.ldap.list_groups() + if ldap_list_group_response: + ldap_groups = ldap_list_group_response["data"]["keys"] + + log.debug("LDAP groups from vault: %s", str(ldap_groups)) + + # Update LDAP group mapping + log.debug( + 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) + local_config_groups = auth_method.extra_config["group_policy_map"] + for key in local_config_groups: + log.debug('LDAP Group ["%s"] -> Policies %s', + str(key), local_config_groups[key]) + + client.auth.ldap.create_or_update_group( + name=key, + policies=local_config_groups[key] ) - # Build return object - ret['changes']['old'] =[ob.type for ob in remote_methods] - ret['changes']['new'] = new_auth_methods or "No changes" + # Clean up LDAP group mapping + if ldap_groups: + for group in ldap_groups: + if group in {k.lower(): v for k, v in local_config_groups.items()}: + log.debug( + 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) + else: + log.debug( + 'LDAP group mapping ["%s"] does not exist in configuration, deleting...', group) + client.auth.ldap.delete_group(name=group) + log.debug( + 'LDAP group mapping ["%s"] deleted.', group) + else: + log.debug( + 'Auth method "%s" does not contain any extra configurations.', auth_method.type + ) - except Exception: - raise + # Build return object + ret['changes']['old'] = [ob.type for ob in remote_methods] + ret['changes']['new'] = new_auth_methods or "No changes" log.info('Finished processing and configuring auth methods...') @@ -437,22 +417,19 @@ def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): """ log.info('Cleaning up auth methods...') has_change = False + for auth_method in remote_methods: + if auth_method not in local_methods: + has_change = True + log.debug( + 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) + client.sys.disable_auth_method( + path=auth_method.path + ) + log.debug('Auth method "%s" is disabled.', + auth_method.type) - try: - for auth_method in remote_methods: - if auth_method not in local_methods: - has_change = True - log.debug( - 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) - client.sys.disable_auth_method( - path=auth_method.path - ) - log.debug('Auth method "%s" is disabled.', auth_method.type) - - if has_change: - ret['changes']['new'] = [ob.type for ob in local_methods] - except Exception: - raise + if has_change: + ret['changes']['new'] = [ob.type for ob in local_methods] log.info('Finished cleaning up auth methods.') @@ -479,28 +456,25 @@ def get_remote_secrets_engines(self, client, ret): """ log.info('Retrieving secrets engines from Vault') remote_secret_engines = [] - try: - secrets_engines_resp = client.sys.list_mounted_secrets_engines() - for engine in secrets_engines_resp['data']: - remote_secret_engines.append( - VaultSecretEngine( - type=secrets_engines_resp[engine]['type'], - path=(secrets_engines_resp[engine]["path"] - if 'path' in secrets_engines_resp[engine] else engine), - description=secrets_engines_resp[engine]["description"], - config=OrderedDict( - sorted(secrets_engines_resp[engine]["config"].items())) - ) + secrets_engines_resp = client.sys.list_mounted_secrets_engines() + for engine in secrets_engines_resp['data']: + remote_secret_engines.append( + VaultSecretEngine( + type=secrets_engines_resp[engine]['type'], + path=(secrets_engines_resp[engine]["path"] + if 'path' in secrets_engines_resp[engine] else engine), + description=secrets_engines_resp[engine]["description"], + config=OrderedDict( + sorted(secrets_engines_resp[engine]["config"].items())) ) - remote_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise + ) + remote_secret_engines.sort(key=lambda x: x.type) log.info('Finished retrieving secrets engines from vault.') return remote_secret_engines def populate_local_secrets_engines(self, configs, ret): - """Retriev secrets engines from local config + """Retrieve secrets engines from local config Arguments: configs {list} -- local secrets engines information @@ -511,39 +485,36 @@ def populate_local_secrets_engines(self, configs, ret): """ log.info('Populating local secret engines...') local_secret_engines = [] - try: - for secret_engine in configs: - config = None - secret_config = None - extra_config = None + for secret_engine in configs: + config = None + secret_config = None + extra_config = None + + if 'config' in secret_engine: + if secret_engine["config"]: + config = OrderedDict( + sorted(secret_engine["config"].items())) + + if 'secret_config' in secret_engine: + if secret_engine["secret_config"]: + secret_config = OrderedDict( + sorted(secret_engine["secret_config"].items())) + + if 'extra_config' in secret_engine: + if secret_engine["extra_config"]: + extra_config = OrderedDict( + sorted(secret_engine["extra_config"].items())) - if 'config' in secret_engine: - if secret_engine["config"]: - config = OrderedDict( - sorted(secret_engine["config"].items())) - - if 'secret_config' in secret_engine: - if secret_engine["secret_config"]: - secret_config = OrderedDict( - sorted(secret_engine["secret_config"].items())) - - if 'extra_config' in secret_engine: - if secret_engine["extra_config"]: - extra_config = OrderedDict( - sorted(secret_engine["extra_config"].items())) - - local_secret_engines.append(VaultSecretEngine( - type=secret_engine["type"], - path=secret_engine["path"], - description=secret_engine["description"], - config=config, - secret_config=secret_config, - extra_config=extra_config - )) + local_secret_engines.append(VaultSecretEngine( + type=secret_engine["type"], + path=secret_engine["path"], + description=secret_engine["description"], + config=config, + secret_config=secret_config, + extra_config=extra_config + )) - local_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise + local_secret_engines.sort(key=lambda x: x.type) log.info('Finished populating local secret engines.') return local_secret_engines @@ -559,109 +530,102 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): """ log.info('Processing and configuring secrets engines...') new_secrets_engines = [] - try: - for secret_engine in local_engines: - log.debug('Checking if secret engine "%s" at path "%s" is enabled...', - secret_engine.type, - secret_engine.path) - if secret_engine in remote_engines: - log.debug( - 'Secret engine "%s" at path "%s" is already enabled. Tuning...', - secret_engine.type, - secret_engine.path) - - client.sys.tune_mount_configuration( - path=secret_engine.path, - description=secret_engine.description, - default_lease_ttl=secret_engine.config["default_lease_ttl"], - max_lease_ttl=secret_engine.config["max_lease_ttl"] + for secret_engine in local_engines: + log.debug('Checking if secret engine "%s" at path "%s" is enabled...', + secret_engine.type, + secret_engine.path) + if secret_engine in remote_engines: + log.debug( + 'Secret engine "%s" at path "%s" is already enabled. Tuning...', + secret_engine.type, + secret_engine.path) + + client.sys.tune_mount_configuration( + path=secret_engine.path, + description=secret_engine.description, + default_lease_ttl=secret_engine.config["default_lease_ttl"], + max_lease_ttl=secret_engine.config["max_lease_ttl"] + ) + log.debug('Secret engine "%s" at path "%s" is tuned.', + secret_engine.type, secret_engine.path) + else: + log.debug( + 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', + secret_engine.type, + secret_engine.path) + + client.sys.enable_secrets_engine( + backend_type=secret_engine.type, + path=secret_engine.path, + description=secret_engine.description, + config=secret_engine.config + ) + + new_secrets_engines.append( + "type: {} - path: {}".format(secret_engine.type, secret_engine.path)) + + log.debug('Secret engine "%s" at path "%s" is enabled.', + secret_engine.type, secret_engine.path) + + if secret_engine.secret_config: + log.info( + 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + client.secrets.activedirectory.configure( + **secret_engine.secret_config ) - log.debug('Secret engine "%s" at path "%s" is tuned.', - secret_engine.type, secret_engine.path) - else: - log.debug( - 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', - secret_engine.type, - secret_engine.path) - - client.sys.enable_secrets_engine( - backend_type=secret_engine.type, - path=secret_engine.path, - description=secret_engine.description, - config=secret_engine.config + if secret_engine.type == 'database': + client.secrets.database.configure( + **secret_engine.secret_config ) - new_secrets_engines.append("type: {} - path: {}".format(secret_engine.type, secret_engine.path)) + log.info( + 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - log.debug('Secret engine "%s" at path "%s" is enabled.', - secret_engine.type, secret_engine.path) + if secret_engine.extra_config: + log.info( + 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - if secret_engine.secret_config: - log.info( - 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + if secret_engine.type == 'ad': + # Get roles from vault + existing_roles = None + existing_roles = client.secrets.activedirectory.list_roles() + log.debug(existing_roles) - if secret_engine.type == 'ad': - client.secrets.activedirectory.configure( - **secret_engine.secret_config - ) - if secret_engine.type == 'database': - client.secrets.database.configure( - **secret_engine.secret_config + # Add new roles + local_roles = secret_engine.extra_config['roles'] + for key in local_roles: + log.debug('AD Role ["%s"] -> Role %s', + str(key), local_roles[key]) + + client.secrets.activedirectory.create_or_update_role( + name=key, + service_account_name=local_roles[key]['service_account_name'], + ttl=local_roles[key]['ttl'] ) - log.info( - 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.extra_config: - log.info( - 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - # Get roles from vault - existing_roles = None - try: - existing_roles = client.secrets.activedirectory.list_roles() - log.debug(existing_roles) - except Exception: - raise - - # Add new roles - local_roles = secret_engine.extra_config['roles'] - for key in local_roles: - log.debug('AD Role ["%s"] -> Role %s', - str(key), local_roles[key]) - try: - client.secrets.activedirectory.create_or_update_role( - name=key, - service_account_name=local_roles[key]['service_account_name'], - ttl=local_roles[key]['ttl'] + # Remove missing roles + if existing_roles: + for role in existing_roles: + if role in {k.lower(): v for k, v in local_roles.items()}: + log.debug( + 'AD role ["%s"] exists in configuration, no cleanup necessary', role) + else: + log.debug( + 'Ad role ["%s"] does not exists in configuration, deleting...', role) + client.secrets.activedirectory.delete_role( + name=role ) - except Exception: - raise - - # Remove missing roles - if existing_roles: - for role in existing_roles: - if role in {k.lower(): v for k, v in local_roles.items()}: - log.debug( - 'AD role ["%s"] exists in configuration, no cleanup necessary', role) - else: - log.debug( - 'Ad role ["%s"] does not exists in configuration, deleting...', role) - client.secrets.activedirectory.delete_role( - name=role - ) - log.debug( - 'AD role has been ["%s"] deleted.', role) - else: - log.debug( - 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type - ) - except Exception: - raise - + log.debug( + 'AD role has been ["%s"] deleted.', role) + else: + log.debug( + 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type + ) # Build return object - ret['changes']['old'] = ["type: {} - path: {}".format(ob.type, ob.path) for ob in remote_engines] + ret['changes']['old'] = [ + "type: {} - path: {}".format(ob.type, ob.path) for ob in remote_engines] ret['changes']['new'] = new_secrets_engines or "No changes" log.info('Finished proccessing and configuring secrets engines.') @@ -677,30 +641,27 @@ def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): """ log.info('Cleaning up secrets engines...') has_changes = False - - try: - for secret_engine in remote_engines: - if not (secret_engine.type == "system" or - secret_engine.type == "cubbyhole" or - secret_engine.type == "identity" or - secret_engine.type == "generic"): - if secret_engine in local_engines: - log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', - secret_engine.type, secret_engine.path) - else: - log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', - secret_engine.type, secret_engine.path) - has_changes = True - client.sys.disable_secrets_engine( - path=secret_engine.path - ) - log.debug('Secrets engine "%s" at path "%s" is disabled.', - secret_engine.type, secret_engine.type) - except Exception: - raise + for secret_engine in remote_engines: + if not (secret_engine.type == "system" or + secret_engine.type == "cubbyhole" or + secret_engine.type == "identity" or + secret_engine.type == "generic"): + if secret_engine in local_engines: + log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', + secret_engine.type, secret_engine.path) + else: + log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', + secret_engine.type, secret_engine.path) + has_changes = True + client.sys.disable_secrets_engine( + path=secret_engine.path + ) + log.debug('Secrets engine "%s" at path "%s" is disabled.', + secret_engine.type, secret_engine.type) if has_changes: - ret['changes']['new'] = ["type: {} - path: {}".format(ob.type, ob.path) for ob in local_engines] + ret['changes']['new'] = [ + "type: {} - path: {}".format(ob.type, ob.path) for ob in local_engines] log.info('Finished cleaning up secrets engines.') @@ -727,25 +688,22 @@ def get_remote_audit_devices(self, client, ret): """ log.info("Retrieving audit devices from vault...") devices = [] - try: - audit_devices_resp = client.sys.list_enabled_audit_devices() - log.debug(audit_devices_resp) - for device in audit_devices_resp['data']: - audit_device = audit_devices_resp[device] - devices.append( - VaultAuditDevice( - type=audit_device['type'], - path=(audit_device["path"] - if 'path' in audit_device else device), - description=audit_device["description"], - config=OrderedDict( - sorted(audit_device["options"].items())) - ) + audit_devices_resp = client.sys.list_enabled_audit_devices() + log.debug(audit_devices_resp) + for device in audit_devices_resp['data']: + audit_device = audit_devices_resp[device] + devices.append( + VaultAuditDevice( + type=audit_device['type'], + path=(audit_device["path"] + if 'path' in audit_device else device), + description=audit_device["description"], + config=OrderedDict( + sorted(audit_device["options"].items())) ) + ) - log.info('Finished retrieving audit devices from vault.') - except Exception: - raise + log.info('Finished retrieving audit devices from vault.') return devices @@ -762,24 +720,21 @@ def get_local_audit_devices(self, configs, ret): log.info("Loading audit devices from local config...") devices = [] if configs: - try: - for audit_device in configs: - config = None - if 'config' in audit_device: - if audit_device['config']: - config = OrderedDict( - sorted(audit_device["config"].items())) - - devices.append( - VaultAuditDevice( - type=audit_device["type"], - path=audit_device["path"], - description=audit_device["description"], - config=config - ) + for audit_device in configs: + config = None + if 'config' in audit_device: + if audit_device['config']: + config = OrderedDict( + sorted(audit_device["config"].items())) + + devices.append( + VaultAuditDevice( + type=audit_device["type"], + path=audit_device["path"], + description=audit_device["description"], + config=config ) - except Exception: - raise + ) log.info('Finished loading audit devices from local config.') @@ -796,35 +751,32 @@ def configure_audit_devices(self, client, remote_devices, local_devices, ret): """ log.info('Processing and configuring audit devices...') new_audit_devices = [] - try: - for audit_device in local_devices: - log.debug('Checking if audit device "%s" at path "%s" is enabled...', - audit_device.type, audit_device.path) + for audit_device in local_devices: + log.debug('Checking if audit device "%s" at path "%s" is enabled...', + audit_device.type, audit_device.path) - if audit_device in remote_devices: - log.debug('Audit device "%s" at path "%s" is already enabled.', - audit_device.type, audit_device.path) - else: - log.debug( - 'Audit device "%s" at path "%s" is not enabled. Enabling now...', - audit_device.type, - audit_device.path - ) - new_audit_devices.append(audit_device.type) - client.sys.enable_audit_device( - device_type=audit_device.type, - path=audit_device.path, - description=audit_device.description, - options=audit_device.config - ) - log.debug('Audit device "%s" at path "%s" is enabled.', - audit_device.type, audit_device.path) + if audit_device in remote_devices: + log.debug('Audit device "%s" at path "%s" is already enabled.', + audit_device.type, audit_device.path) + else: + log.debug( + 'Audit device "%s" at path "%s" is not enabled. Enabling now...', + audit_device.type, + audit_device.path + ) + new_audit_devices.append(audit_device.type) + client.sys.enable_audit_device( + device_type=audit_device.type, + path=audit_device.path, + description=audit_device.description, + options=audit_device.config + ) + log.debug('Audit device "%s" at path "%s" is enabled.', + audit_device.type, audit_device.path) - # Build return object - ret['changes']['old'] = [ob.type for ob in remote_devices] - ret['changes']['new'] = new_audit_devices or "No changes" - except Exception: - raise + # Build return object + ret['changes']['old'] = [ob.type for ob in remote_devices] + ret['changes']['new'] = new_audit_devices or "No changes" log.info('Finished processing audit devices.') @@ -839,20 +791,16 @@ def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): """ log.info('Cleaning up audit devices...') has_changes = False - try: - for audit_device in remote_devices: - if audit_device not in local_devices: - log.info('Disabling audit device "%s" at path "%s"...', - audit_device.type, audit_device.path) - has_changes = True - client.sys.disable_audit_device( - path=audit_device.path - ) - - if has_changes: - ret['changes']['new'] = [ob.type for ob in local_devices] + for audit_device in remote_devices: + if audit_device not in local_devices: + log.info('Disabling audit device "%s" at path "%s"...', + audit_device.type, audit_device.path) + has_changes = True + client.sys.disable_audit_device( + path=audit_device.path + ) - except Exception: - raise + if has_changes: + ret['changes']['new'] = [ob.type for ob in local_devices] log.info('Finished cleaning up audit devices.') diff --git a/salt/vault/map.jinja b/salt/vault/map.jinja index 7eef7d8..1ed99cd 100644 --- a/salt/vault/map.jinja +++ b/salt/vault/map.jinja @@ -13,6 +13,3 @@ ) ) ) %} - -{#- Merge the vault pillar #} -{%- set vault = salt.pillar.get('vault', default=vault, merge=True) %} diff --git a/salt/vault/service.sls b/salt/vault/service.sls index 57e1ca3..bc1f460 100644 --- a/salt/vault/service.sls +++ b/salt/vault/service.sls @@ -8,26 +8,16 @@ vault_service_init_file_managed: - defaults: dev_configs: {{ vault.dev_configs }} -{%- if not vault.dev_mode %} - -manage_selinux_mode: - selinux.mode: - - name: permissive - -vault_service_running: - service.running: - - name: vault - - enable: True - - reload: True - - require: - - selinux: manage_selinux_mode - -{%- else %} - vault_service_running: service.running: - name: vault - enable: True - reload: True +{%- if not vault.dev_mode %} +manage_selinux_mode: + selinux.mode: + - name: permissive + - require_in: + - service: vault_service_running {%- endif %} diff --git a/scripts/terraform-docs.awk b/scripts/terraform-docs.awk deleted file mode 100644 index bd6b2b7..0000000 --- a/scripts/terraform-docs.awk +++ /dev/null @@ -1,90 +0,0 @@ -# This script converts Terraform 0.12 variables/outputs to something suitable for `terraform-docs` -# As of terraform-docs v0.6.0, HCL2 is not supported. This script is a *dirty hack* to get around it. -# https://github.com/segmentio/terraform-docs/ -# https://github.com/segmentio/terraform-docs/issues/62 - -{ - if ( $0 ~ /\{/ ) { - braceCnt++ - } - - if ( $0 ~ /\}/ ) { - braceCnt-- - } - - # [START] variable or output block started - if ($0 ~ /^[[:space:]]*(variable|output)[[:space:]][[:space:]]*"(.*?)"/) { - # Normalize the braceCnt (should be 1 now) - braceCnt = 1 - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - blockCnt++ - print $0 - } - - # [START] multiline default statement started - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*(default)[[:space:]][[:space:]]*=/) { - if ($3 ~ "null") { - print " default = \"null\"" - } else { - print $0 - blockDefCnt++ - blockDefStart=1 - } - } - } - - # [PRINT] single line "description" - if (blockCnt > 0) { - if (blockDefCnt == 0) { - if ($0 ~ /^[[:space:]][[:space:]]*description[[:space:]][[:space:]]*=/) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - print $0 - } - } - } - - # [PRINT] single line "type" - if (blockCnt > 0) { - if ($0 ~ /^[[:space:]][[:space:]]*type[[:space:]][[:space:]]*=/ ) { - # [CLOSE] "default" block - if (blockDefCnt > 0) { - blockDefCnt = 0 - } - type=$3 - if (type ~ "object") { - print " type = \"object\"" - } else { - # legacy quoted types: "string", "list", and "map" - if ($3 ~ /^[[:space:]]*"(.*?)"[[:space:]]*$/) { - print " type = " $3 - } else { - print " type = \"" $3 "\"" - } - } - } - } - - # [CLOSE] variable/output block - if (blockCnt > 0) { - if (braceCnt == 0 && blockCnt > 0) { - blockCnt-- - print $0 - } - } - - # [PRINT] Multiline "default" statement - if (blockCnt > 0 && blockDefCnt > 0) { - if (blockDefStart == 1) { - blockDefStart = 0 - } else { - print $0 - } - } -} diff --git a/scripts/terraform-docs.sh b/scripts/terraform-docs.sh deleted file mode 100755 index ecdbea7..0000000 --- a/scripts/terraform-docs.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -which awk 2>&1 >/dev/null || ( echo "awk not available"; exit 1) -which terraform 2>&1 >/dev/null || ( echo "terraform not available"; exit 1) -which terraform-docs 2>&1 >/dev/null || ( echo "terraform-docs not available"; exit 1) - -if [[ "`terraform version | head -1`" =~ 0\.12 ]]; then - TMP_FILE="$(mktemp /tmp/terraform-docs-XXXXXXXXXX)" - awk -f scripts/terraform-docs.awk *.tf > ${TMP_FILE} - terraform-docs $1 ${TMP_FILE} - rm -f ${TMP_FILE} -else - terraform-docs $1 $2 -fi diff --git a/tests/module_test.go b/tests/module_test.go index ab00874..720e08f 100644 --- a/tests/module_test.go +++ b/tests/module_test.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "log" "net/http" + "os" "testing" "time" @@ -17,62 +18,40 @@ import ( func TestModule(t *testing.T) { files, err := ioutil.ReadDir("./") + if err != nil { log.Fatal(err) } for _, f := range files { // look for directories with test cases in it - if f.IsDir() { - if f.Name() != "vendor" { - testFiles, testErr := ioutil.ReadDir(f.Name()) - if testErr != nil { - log.Fatal(testErr) - } - - // see if a prereq directory exists - for _, testF := range testFiles { - if testF.IsDir() { - if testF.Name() == "prereq" { - directory := f.Name() + "/" + testF.Name() - runTerraformPreReq(t, directory) - } - } + if f.IsDir() && f.Name() != "vendor" { + t.Run(f.Name(), func(t *testing.T) { + // check if a prereq directory exists + prereqDir := f.Name() + "/prereq/" + if _, err := os.Stat(prereqDir); err == nil { + prereqOptions := createTerraformOptions(prereqDir) + defer terraform.Destroy(t, prereqOptions) + terraform.InitAndApply(t, prereqOptions) } - // run terraform code - runTerraform(t, f.Name()) - } + // run terraform code for test case + terraformOptions := createTerraformOptions(f.Name()) + defer terraform.Destroy(t, terraformOptions) + terraform.InitAndApply(t, terraformOptions) + testVaultViaAlb(t, terraformOptions) + }) } } } -// The prequisite function runs the terraform code but doesn't destroy it afterwards so that the state can be used for further testing -func runTerraformPreReq(t *testing.T, directory string) { +func createTerraformOptions(directory string) *terraform.Options { terraformOptions := &terraform.Options{ TerraformDir: directory, NoColor: true, } - // This will run `terraform init` and `terraform apply` and fail the test if there are any errors - terraform.InitAndApply(t, terraformOptions) -} - -func runTerraform(t *testing.T, directory string) { - terraformOptions := &terraform.Options{ - // The path to where your Terraform code is located - TerraformDir: directory, - // Disable color output - NoColor: true, - } - - // At the end of the test, run `terraform destroy` to clean up any resources that were created - defer terraform.Destroy(t, terraformOptions) - - // This will run `terraform init` and `terraform apply` and fail the test if there are any errors - terraform.InitAndApply(t, terraformOptions) - - testVaultViaAlb(t, terraformOptions) + return terraformOptions } // Use the Vault client to connect to the Vault via the ALB, via the route53 record, and make sure it works without @@ -82,7 +61,7 @@ func testVaultViaAlb(t *testing.T, terraformOptions *terraform.Options) { description := fmt.Sprintf("Testing Vault via ALB at cluster URL %s", clusterURL) logger.Logf(t, description) - maxRetries := 30 + maxRetries := 3 sleepBetweenRetries := 10 * time.Second vaultClient := createVaultClient(t, clusterURL) diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf index 8ffdc2b..b59fc85 100644 --- a/tests/vault-py2/main.tf +++ b/tests/vault-py2/main.tf @@ -34,7 +34,7 @@ module "base" { # Watchmaker settings watchmaker_config = var.watchmaker_config - toggle_update = "A" + toggle_update = "B" } output "cluster_url" { diff --git a/tests/vault-py2/variables.tf b/tests/vault-py2/variables.tf index e529dd9..d7743a7 100644 --- a/tests/vault-py2/variables.tf +++ b/tests/vault-py2/variables.tf @@ -30,7 +30,7 @@ variable "vault_version" { } variable "vault_pillar_path" { - type = string + type = string description = "Specify the path to vault pillar" } diff --git a/tests/vault-py3/main.tf b/tests/vault-py3/main.tf index 045c3fc..15f4f34 100644 --- a/tests/vault-py3/main.tf +++ b/tests/vault-py3/main.tf @@ -29,8 +29,9 @@ module "vault-py3" { certificate_arn = var.certificate_arn # Vault settings - vault_version = var.vault_version - dynamodb_table = var.dynamodb_table + vault_version = var.vault_version + vault_pillar_path = var.vault_pillar_path + dynamodb_table = var.dynamodb_table # Watchmaker settings watchmaker_config = var.watchmaker_config diff --git a/tests/vault-py3/pillar/top.sls b/tests/vault-py3/pillar/top.sls new file mode 100644 index 0000000..7e34ce6 --- /dev/null +++ b/tests/vault-py3/pillar/top.sls @@ -0,0 +1,3 @@ +base: + "*": + - vault diff --git a/tests/vault-py3/pillar/vault/init.sls b/tests/vault-py3/pillar/vault/init.sls new file mode 100644 index 0000000..f3a2a97 --- /dev/null +++ b/tests/vault-py3/pillar/vault/init.sls @@ -0,0 +1,48 @@ +vault: + lookup: + api_port: ${api_port} + cluster_port: ${cluster_port} + dynamodb_table: ${dynamodb_table} + inbound_cidrs: ${inbound_cidrs} + kms_key_id: ${kms_key_id} + logs_path: ${logs_path} + logs_dir: ${logs_dir} + region: ${region} + ssm_path: ${ssm_path} + version: ${vault_version} + + secrets_engines: + - type: kv + path: services + description: Sevices specific folders + config: + default_lease_ttl: 1800 + max_lease_ttl: 1800 + + auth_methods: + - type: token + path: token + description: token based credentials + config: + default_lease_ttl: 0 + max_lease_ttl: 0 + + audit_devices: + - type: file + path: file_log + description: first audit device + config: + file_path: /etc/vault/logs/audit.log + + policies: + - name: xyz_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create, update, delete, list]} + + - name: abc_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create]} diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf index cf4bda8..c7377e3 100644 --- a/tests/vault-py3/variables.tf +++ b/tests/vault-py3/variables.tf @@ -51,6 +51,11 @@ variable "vault_version" { type = string } +variable "vault_pillar_path" { + type = string + description = "Specify the path to vault pillar" +} + variable "dynamodb_table" { description = "Name of the Dynamodb to be used as storage backend for Vault" type = string