From 175c0c1592dac3093f7b0122af95edb8afa94763 Mon Sep 17 00:00:00 2001 From: Triet Le Date: Tue, 1 Oct 2019 16:05:59 -0400 Subject: [PATCH] Replace S3 bucket module with S3 Bucket Resource --- Makefile | 15 +- README.md | 1 - main.tf | 38 +- modules/iam/iam_policy.json | 6 +- salt/_modules/vault.py | 766 ++++++++++++-------------- salt/vault/map.jinja | 3 - salt/vault/service.sls | 22 +- tests/module_test.go | 59 +- tests/vault-py2/main.tf | 2 +- tests/vault-py2/variables.tf | 2 +- tests/vault-py3/main.tf | 5 +- tests/vault-py3/pillar/top.sls | 3 + tests/vault-py3/pillar/vault/init.sls | 48 ++ tests/vault-py3/variables.tf | 5 + 14 files changed, 475 insertions(+), 500 deletions(-) create mode 100644 tests/vault-py3/pillar/top.sls create mode 100644 tests/vault-py3/pillar/vault/init.sls diff --git a/Makefile b/Makefile index e4eff0d..724b8ff 100755 --- a/Makefile +++ b/Makefile @@ -78,6 +78,11 @@ shellcheck/install: $(BIN_DIR) guard/program/xz rm -rf $(@D)-* $(@D) --version +tfdocs-awk/install: $(BIN_DIR) +tfdocs-awk/install: ARCHIVE := https://github.com/plus3it/tfdocs-awk/archive/master.tar.gz +tfdocs-awk/install: + $(CURL) $(ARCHIVE) | tar -C $(BIN_DIR) --strip-components=1 --wildcards '*.sh' --wildcards '*.awk' -xzvf - + terraform/lint: | guard/program/terraform @ echo "[$@]: Linting Terraform files..." terraform fmt -check=true -diff=true @@ -100,15 +105,15 @@ json/format: | guard/program/jq $(FIND_JSON) | $(XARGS) bash -c 'echo "$$(jq --indent 4 -S . "{}")" > "{}"' @ echo "[$@]: Successfully formatted JSON files!" -docs/%: README_PARTS := _docs/MAIN.md <(echo) <(./scripts/terraform-docs.sh markdown table .) +docs/%: README_PARTS := _docs/MAIN.md <(echo) <($(BIN_DIR)/terraform-docs.sh markdown table .) docs/%: README_FILE ?= README.md -docs/lint: | guard/program/terraform-docs +docs/lint: | guard/program/terraform-docs tfdocs-awk/install @ echo "[$@]: Linting documentation files.." diff $(README_FILE) <(cat $(README_PARTS)) @ echo "[$@]: Documentation files PASSED lint test!" -docs/generate: | guard/program/terraform-docs +docs/generate: | guard/program/terraform-docs tfdocs-awk/install @ echo "[$@]: Creating documentation files.." cat $(README_PARTS) > $(README_FILE) @ echo "[$@]: Documentation files creation complete!" @@ -119,4 +124,6 @@ terratest/install: | guard/program/go cd tests && go mod tidy terratest/test: | guard/program/go - cd tests && go test -v -timeout 40m + cd tests && go test -count=1 -timeout 60m + +test: terratest/test diff --git a/README.md b/README.md index 8fea6ce..2f8cfdc 100755 --- a/README.md +++ b/README.md @@ -53,4 +53,3 @@ Terraform module that installs and configures Hashicorp Vault cluster with HA Dy | Name | Description | |------|-------------| | vault\_url | URL to access Vault UI | - diff --git a/main.tf b/main.tf index b88bb5d..33d8c92 100644 --- a/main.tf +++ b/main.tf @@ -21,9 +21,9 @@ locals { logs_path = "${local.logs_dir}/state.vault" enabled_repos = "epel" default_inbound_cdirs = ["10.0.0.0/16"] - s3_appscript_url = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.appscript_file_name}" - s3_salt_vault_content = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.archive_file_name}" - s3_pillar_url = "s3://${module.s3_bucket.this_s3_bucket_id}/${local.pillar_file_name}" + s3_appscript_url = "s3://${aws_s3_bucket.this.id}/${local.appscript_file_name}" + s3_salt_vault_content = "s3://${aws_s3_bucket.this.id}/${local.archive_file_name}" + s3_pillar_url = "s3://${aws_s3_bucket.this.id}/${local.pillar_file_name}" archive_path = join("/", [path.module, ".files", local.archive_file_name]) pillar_path = join("/", [path.cwd, ".files", local.pillar_file_name]) appscript_path = join("/", [path.module, "scripts", local.appscript_file_name]) @@ -106,23 +106,22 @@ data "archive_file" "pillar" { } resource "aws_s3_bucket_object" "pillar" { - bucket = module.s3_bucket.this_s3_bucket_id + bucket = aws_s3_bucket.this.id key = local.pillar_file_name source = local.pillar_path etag = data.archive_file.pillar.output_md5 } -# Manage S3 bucket module -module "s3_bucket" { - source = "git::https://github.com/terraform-aws-modules/terraform-aws-s3-bucket.git?ref=v0.1.0" - +# Manage S3 bucket +resource "aws_s3_bucket" "this" { bucket = local.bucket_name -} + tags = local.tags +} resource "aws_s3_bucket_policy" "this" { - bucket = module.s3_bucket.this_s3_bucket_id - policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = module.s3_bucket.this_s3_bucket_arn }) + bucket = aws_s3_bucket.this.id + policy = templatefile("${path.module}/policies/bucket_policy.json", { bucket_arn = aws_s3_bucket.this.arn }) } # Manage IAM module @@ -131,7 +130,7 @@ module "iam" { role_name = local.role_name policy_vars = { - bucket_name = module.s3_bucket.this_s3_bucket_id + bucket_name = aws_s3_bucket.this.id dynamodb_table = local.dynamodb_table kms_key_id = local.kms_key_id stack_name = var.name @@ -155,7 +154,7 @@ data "archive_file" "salt" { } resource "aws_s3_bucket_object" "salt_zip" { - bucket = module.s3_bucket.this_s3_bucket_id + bucket = aws_s3_bucket.this.id key = local.archive_file_name source = local.archive_path etag = data.archive_file.salt.output_md5 @@ -171,7 +170,7 @@ data "template_file" "appscript" { } resource "aws_s3_bucket_object" "app_script" { - bucket = module.s3_bucket.this_s3_bucket_id + bucket = aws_s3_bucket.this.id key = local.appscript_file_name content = data.template_file.appscript.rendered etag = md5(data.template_file.appscript.rendered) @@ -285,7 +284,7 @@ resource "aws_lb_target_group" "this" { # /sys/health will return 200 only if the vault instance # is the leader. Meaning there will only ever be one healthy # instance, but a failure will cause a new instance to - # be healthy automatically. This healthceck path prevents + # be healthy automatically. This healthcheck path prevents # unnecessary redirect loops by not sending traffic to # followers, which always just route traffic to the master health_check { @@ -414,16 +413,15 @@ resource "aws_appautoscaling_policy" "this" { # Manage autoscaling group module "autoscaling_group" { - source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=1.15.7" + source = "git::https://github.com/plus3it/terraform-aws-watchmaker//modules/lx-autoscale?ref=2.0.0" Name = var.name OnFailureAction = "" DisableRollback = "true" - AmiId = data.aws_ami.this.id - AmiDistro = "CentOS" - AppScriptUrl = local.s3_appscript_url - CfnBootstrapUtilsUrl = var.cfn_bootstrap_utils_url + AmiId = data.aws_ami.this.id + AmiDistro = "CentOS" + AppScriptUrl = local.s3_appscript_url CfnEndpointUrl = var.cfn_endpoint_url CloudWatchAgentUrl = var.cloudwatch_agent_url diff --git a/modules/iam/iam_policy.json b/modules/iam/iam_policy.json index b8ef8ae..5f98608 100644 --- a/modules/iam/iam_policy.json +++ b/modules/iam/iam_policy.json @@ -7,7 +7,7 @@ ], "Effect": "Allow", "Resource": [ - "arn:${partition}:cloudformation:${region}:${account_id}:stack/${stack_name}*" + "arn:${partition}:cloudformation:${region}:${account_id}:stack/${stack_name}/*" ], "Sid": "CfnActions" }, @@ -63,9 +63,9 @@ ], "Effect": "Allow", "Resource": [ - "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${stack_name}*" + "arn:${partition}:logs:${region}:${account_id}:log-group:/aws/ec2/lx/${stack_name}:log-stream:*" ], - "Sid": "CloudWatchLogActions" + "Sid": "CloudWatchLogStreamActions" }, { "Action": [ diff --git a/salt/_modules/vault.py b/salt/_modules/vault.py index 6783bd8..3aaa065 100644 --- a/salt/_modules/vault.py +++ b/salt/_modules/vault.py @@ -168,15 +168,11 @@ def get_remote_policies(self, client, ret): """ log.info('Retrieving policies from vault...') polices = [] - try: - policies_resp = client.sys.list_policies() + policies_resp = client.sys.list_policies() - for policy in policies_resp['data']['policies']: - if not (policy == 'root' or policy == 'default'): - polices.append(policy) - - except Exception: - raise + for policy in policies_resp['data']['policies']: + if not (policy == 'root' or policy == 'default'): + polices.append(policy) log.info('Finished retrieving policies from vault.') @@ -193,23 +189,20 @@ def push_policies(self, client, remote_policies, local_policies, ret): """ log.info('Pushing policies from local config folder to vault...') new_policies = [] - try: - for policy in local_policies: - client.sys.create_or_update_policy( - name=policy['name'], - policy=policy['content'] - ) - if policy['name'] in remote_policies: - log.debug('Policy "%s" has been updated.', policy["name"]) - else: - new_policies.append(policy["name"]) - log.debug('Policy "%s" has been created.', policy["name"]) + for policy in local_policies: + client.sys.create_or_update_policy( + name=policy['name'], + policy=policy['content'] + ) + if policy['name'] in remote_policies: + log.debug('Policy "%s" has been updated.', policy["name"]) + else: + new_policies.append(policy["name"]) + log.debug('Policy "%s" has been created.', policy["name"]) - # Build return object - ret['changes']['old'] = remote_policies - ret['changes']['new'] = new_policies or "No changes" - except Exception: - raise + # Build return object + ret['changes']['old'] = remote_policies + ret['changes']['new'] = new_policies or "No changes" log.info('Finished pushing policies local config folder to vault.') @@ -224,19 +217,16 @@ def cleanup_policies(self, client, remote_policies, local_policies, ret): """ log.info('Cleaning up vault policies...') has_change = False - try: - for policy in remote_policies: - if policy not in [pol['name'] for pol in local_policies]: - log.debug( - '"%s" is not found in configs folder. Removing it from vault...', policy) - has_change = True - client.sys.delete_policy(name=policy) - log.debug('"%s" is removed.', policy) - - if has_change: - ret['changes']['new'] = [ob['name'] for ob in local_policies] - except Exception: - raise + for policy in remote_policies: + if policy not in [pol['name'] for pol in local_policies]: + log.debug( + '"%s" is not found in configs folder. Removing it from vault...', policy) + has_change = True + client.sys.delete_policy(name=policy) + log.debug('"%s" is removed.', policy) + + if has_change: + ret['changes']['new'] = [ob['name'] for ob in local_policies] log.info('Finished cleaning up vault policies.') @@ -246,9 +236,9 @@ class VaultAuthManager(): """ def __init__(self): - """Initialize Authentication Manager - """ - log.info("Initializing Vault Auth Manager...") + """Initialize Authentication Manager + """ + log.info("Initializing Vault Auth Manager...") def get_remote_auth_methods(self, client, ret): """Retrieve authentication methods from remote vault server @@ -264,20 +254,17 @@ def get_remote_auth_methods(self, client, ret): auth_resp = client.sys.list_auth_methods() auth_methods = [] - try: - for auth_method in auth_resp['data']: - auth_methods.append( - VaultAuthMethod( - type=auth_resp[auth_method]['type'], - path=(auth_resp[auth_method]["path"] - if 'path' in auth_resp[auth_method] else auth_method), - description=auth_resp[auth_method]["description"], - config=OrderedDict( - sorted(auth_resp[auth_method]["config"].items())) - ) + for auth_method in auth_resp['data']: + auth_methods.append( + VaultAuthMethod( + type=auth_resp[auth_method]['type'], + path=(auth_resp[auth_method]["path"] + if 'path' in auth_resp[auth_method] else auth_method), + description=auth_resp[auth_method]["description"], + config=OrderedDict( + sorted(auth_resp[auth_method]["config"].items())) ) - except Exception: - raise + ) log.info('Finished retrieving auth methods from vault.') @@ -296,32 +283,29 @@ def populate_local_auth_methods(self, configs, ret): log.info('Populating local auth methods...') auth_methods = [] - try: - for auth_method in configs: - auth_config = None - extra_config = None - - if "auth_config" in auth_method: - auth_config = OrderedDict( - sorted(auth_method["auth_config"].items())) - - if "extra_config" in auth_method: - extra_config = OrderedDict( - sorted(auth_method["extra_config"].items())) - - auth_methods.append( - VaultAuthMethod( - type=auth_method["type"], - path=auth_method["path"], - description=auth_method["description"], - config=OrderedDict( - sorted(auth_method["config"].items())), - auth_config=auth_config, - extra_config=extra_config - ) + for auth_method in configs: + auth_config = None + extra_config = None + + if "auth_config" in auth_method: + auth_config = OrderedDict( + sorted(auth_method["auth_config"].items())) + + if "extra_config" in auth_method: + extra_config = OrderedDict( + sorted(auth_method["extra_config"].items())) + + auth_methods.append( + VaultAuthMethod( + type=auth_method["type"], + path=auth_method["path"], + description=auth_method["description"], + config=OrderedDict( + sorted(auth_method["config"].items())), + auth_config=auth_config, + extra_config=extra_config ) - except Exception: - raise + ) log.info('Finished populating local auth methods.') @@ -341,88 +325,84 @@ def configure_auth_methods(self, client, remote_methods, local_methods, ret): new_auth_methods = [] ldap_groups = [] - try: - for auth_method in local_methods: - log.debug('Checking if auth method "%s" is enabled...', - auth_method.path) - if auth_method in remote_methods: - log.debug( - 'Auth method "%s" is already enabled. Tuning...', auth_method.path) - client.sys.tune_auth_method( - path=auth_method.path, - description=auth_method.description, - default_lease_ttl=auth_method.config["default_lease_ttl"], - max_lease_ttl=auth_method.config["max_lease_ttl"] - ) - log.debug('Auth method "%s" is tuned.', auth_method.type) - else: - log.debug( - 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) - client.sys.enable_auth_method( - method_type=auth_method.type, - path=auth_method.path, - description=auth_method.description, - config=auth_method.config - ) - log.debug('Auth method "%s" is enabled.', auth_method.type) - new_auth_methods.append(auth_method.type) - - # Provision config for specific auth method - if auth_method.auth_config: - if auth_method.type == "ldap": - log.debug('Provisioning configuration for LDAP...') - client.auth.ldap.configure(**auth_method.auth_config) - log.debug('Configuration for LDAP is provisioned.') - else: - log.debug( - 'Auth method "%s" does not contain any specific configurations.', auth_method.type) - - if auth_method.extra_config: - log.debug( - 'Provisioning extra configurations for auth method "%s"', auth_method.type) - # Get LDAP group mapping from vault - ldap_list_group_response = client.auth.ldap.list_groups() - if ldap_list_group_response: - ldap_groups = ldap_list_group_response["data"]["keys"] - - log.debug("LDAP groups from vault: %s", str(ldap_groups)) - - # Update LDAP group mapping - log.debug( - 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) - local_config_groups = auth_method.extra_config["group_policy_map"] - for key in local_config_groups: - log.debug('LDAP Group ["%s"] -> Policies %s', - str(key), local_config_groups[key]) - - client.auth.ldap.create_or_update_group( - name=key, - policies=local_config_groups[key] - ) - - # Clean up LDAP group mapping - if ldap_groups: - for group in ldap_groups: - if group in {k.lower(): v for k, v in local_config_groups.items()}: - log.debug( - 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) - else: - log.debug( - 'LDAP group mapping ["%s"] does not exist in configuration, deleting...', group) - client.auth.ldap.delete_group(name=group) - log.debug( - 'LDAP group mapping ["%s"] deleted.', group) - else: - log.debug( - 'Auth method "%s" does not contain any extra configurations.', auth_method.type + for auth_method in local_methods: + log.debug('Checking if auth method "%s" is enabled...', + auth_method.path) + if auth_method in remote_methods: + log.debug( + 'Auth method "%s" is already enabled. Tuning...', auth_method.path) + client.sys.tune_auth_method( + path=auth_method.path, + description=auth_method.description, + default_lease_ttl=auth_method.config["default_lease_ttl"], + max_lease_ttl=auth_method.config["max_lease_ttl"] + ) + log.debug('Auth method "%s" is tuned.', auth_method.type) + else: + log.debug( + 'Auth method "%s" is not enabled. Enabling now...', auth_method.path) + client.sys.enable_auth_method( + method_type=auth_method.type, + path=auth_method.path, + description=auth_method.description, + config=auth_method.config + ) + log.debug('Auth method "%s" is enabled.', auth_method.type) + new_auth_methods.append(auth_method.type) + + # Provision config for specific auth method + if auth_method.auth_config: + if auth_method.type == "ldap": + log.debug('Provisioning configuration for LDAP...') + client.auth.ldap.configure(**auth_method.auth_config) + log.debug('Configuration for LDAP is provisioned.') + else: + log.debug( + 'Auth method "%s" does not contain any specific configurations.', auth_method.type) + + if auth_method.extra_config: + log.debug( + 'Provisioning extra configurations for auth method "%s"', auth_method.type) + # Get LDAP group mapping from vault + ldap_list_group_response = client.auth.ldap.list_groups() + if ldap_list_group_response: + ldap_groups = ldap_list_group_response["data"]["keys"] + + log.debug("LDAP groups from vault: %s", str(ldap_groups)) + + # Update LDAP group mapping + log.debug( + 'Writing LDAP group -> Policy mappings for "%s"', str(auth_method.path)) + local_config_groups = auth_method.extra_config["group_policy_map"] + for key in local_config_groups: + log.debug('LDAP Group ["%s"] -> Policies %s', + str(key), local_config_groups[key]) + + client.auth.ldap.create_or_update_group( + name=key, + policies=local_config_groups[key] ) - # Build return object - ret['changes']['old'] =[ob.type for ob in remote_methods] - ret['changes']['new'] = new_auth_methods or "No changes" + # Clean up LDAP group mapping + if ldap_groups: + for group in ldap_groups: + if group in {k.lower(): v for k, v in local_config_groups.items()}: + log.debug( + 'LDAP group mapping ["%s"] exists in configuration, no cleanup necessary', group) + else: + log.debug( + 'LDAP group mapping ["%s"] does not exist in configuration, deleting...', group) + client.auth.ldap.delete_group(name=group) + log.debug( + 'LDAP group mapping ["%s"] deleted.', group) + else: + log.debug( + 'Auth method "%s" does not contain any extra configurations.', auth_method.type + ) - except Exception: - raise + # Build return object + ret['changes']['old'] = [ob.type for ob in remote_methods] + ret['changes']['new'] = new_auth_methods or "No changes" log.info('Finished processing and configuring auth methods...') @@ -437,22 +417,19 @@ def cleanup_auth_methods(self, client, remote_methods, local_methods, ret): """ log.info('Cleaning up auth methods...') has_change = False + for auth_method in remote_methods: + if auth_method not in local_methods: + has_change = True + log.debug( + 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) + client.sys.disable_auth_method( + path=auth_method.path + ) + log.debug('Auth method "%s" is disabled.', + auth_method.type) - try: - for auth_method in remote_methods: - if auth_method not in local_methods: - has_change = True - log.debug( - 'Auth method "%s" does not exist in configuration. Disabling...', auth_method.type) - client.sys.disable_auth_method( - path=auth_method.path - ) - log.debug('Auth method "%s" is disabled.', auth_method.type) - - if has_change: - ret['changes']['new'] = [ob.type for ob in local_methods] - except Exception: - raise + if has_change: + ret['changes']['new'] = [ob.type for ob in local_methods] log.info('Finished cleaning up auth methods.') @@ -479,28 +456,25 @@ def get_remote_secrets_engines(self, client, ret): """ log.info('Retrieving secrets engines from Vault') remote_secret_engines = [] - try: - secrets_engines_resp = client.sys.list_mounted_secrets_engines() - for engine in secrets_engines_resp['data']: - remote_secret_engines.append( - VaultSecretEngine( - type=secrets_engines_resp[engine]['type'], - path=(secrets_engines_resp[engine]["path"] - if 'path' in secrets_engines_resp[engine] else engine), - description=secrets_engines_resp[engine]["description"], - config=OrderedDict( - sorted(secrets_engines_resp[engine]["config"].items())) - ) + secrets_engines_resp = client.sys.list_mounted_secrets_engines() + for engine in secrets_engines_resp['data']: + remote_secret_engines.append( + VaultSecretEngine( + type=secrets_engines_resp[engine]['type'], + path=(secrets_engines_resp[engine]["path"] + if 'path' in secrets_engines_resp[engine] else engine), + description=secrets_engines_resp[engine]["description"], + config=OrderedDict( + sorted(secrets_engines_resp[engine]["config"].items())) ) - remote_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise + ) + remote_secret_engines.sort(key=lambda x: x.type) log.info('Finished retrieving secrets engines from vault.') return remote_secret_engines def populate_local_secrets_engines(self, configs, ret): - """Retriev secrets engines from local config + """Retrieve secrets engines from local config Arguments: configs {list} -- local secrets engines information @@ -511,39 +485,36 @@ def populate_local_secrets_engines(self, configs, ret): """ log.info('Populating local secret engines...') local_secret_engines = [] - try: - for secret_engine in configs: - config = None - secret_config = None - extra_config = None + for secret_engine in configs: + config = None + secret_config = None + extra_config = None + + if 'config' in secret_engine: + if secret_engine["config"]: + config = OrderedDict( + sorted(secret_engine["config"].items())) + + if 'secret_config' in secret_engine: + if secret_engine["secret_config"]: + secret_config = OrderedDict( + sorted(secret_engine["secret_config"].items())) + + if 'extra_config' in secret_engine: + if secret_engine["extra_config"]: + extra_config = OrderedDict( + sorted(secret_engine["extra_config"].items())) - if 'config' in secret_engine: - if secret_engine["config"]: - config = OrderedDict( - sorted(secret_engine["config"].items())) - - if 'secret_config' in secret_engine: - if secret_engine["secret_config"]: - secret_config = OrderedDict( - sorted(secret_engine["secret_config"].items())) - - if 'extra_config' in secret_engine: - if secret_engine["extra_config"]: - extra_config = OrderedDict( - sorted(secret_engine["extra_config"].items())) - - local_secret_engines.append(VaultSecretEngine( - type=secret_engine["type"], - path=secret_engine["path"], - description=secret_engine["description"], - config=config, - secret_config=secret_config, - extra_config=extra_config - )) + local_secret_engines.append(VaultSecretEngine( + type=secret_engine["type"], + path=secret_engine["path"], + description=secret_engine["description"], + config=config, + secret_config=secret_config, + extra_config=extra_config + )) - local_secret_engines.sort(key=lambda x: x.type) - except Exception: - raise + local_secret_engines.sort(key=lambda x: x.type) log.info('Finished populating local secret engines.') return local_secret_engines @@ -559,109 +530,102 @@ def configure_secrets_engines(self, client, remote_engines, local_engines, ret): """ log.info('Processing and configuring secrets engines...') new_secrets_engines = [] - try: - for secret_engine in local_engines: - log.debug('Checking if secret engine "%s" at path "%s" is enabled...', - secret_engine.type, - secret_engine.path) - if secret_engine in remote_engines: - log.debug( - 'Secret engine "%s" at path "%s" is already enabled. Tuning...', - secret_engine.type, - secret_engine.path) - - client.sys.tune_mount_configuration( - path=secret_engine.path, - description=secret_engine.description, - default_lease_ttl=secret_engine.config["default_lease_ttl"], - max_lease_ttl=secret_engine.config["max_lease_ttl"] + for secret_engine in local_engines: + log.debug('Checking if secret engine "%s" at path "%s" is enabled...', + secret_engine.type, + secret_engine.path) + if secret_engine in remote_engines: + log.debug( + 'Secret engine "%s" at path "%s" is already enabled. Tuning...', + secret_engine.type, + secret_engine.path) + + client.sys.tune_mount_configuration( + path=secret_engine.path, + description=secret_engine.description, + default_lease_ttl=secret_engine.config["default_lease_ttl"], + max_lease_ttl=secret_engine.config["max_lease_ttl"] + ) + log.debug('Secret engine "%s" at path "%s" is tuned.', + secret_engine.type, secret_engine.path) + else: + log.debug( + 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', + secret_engine.type, + secret_engine.path) + + client.sys.enable_secrets_engine( + backend_type=secret_engine.type, + path=secret_engine.path, + description=secret_engine.description, + config=secret_engine.config + ) + + new_secrets_engines.append( + "type: {} - path: {}".format(secret_engine.type, secret_engine.path)) + + log.debug('Secret engine "%s" at path "%s" is enabled.', + secret_engine.type, secret_engine.path) + + if secret_engine.secret_config: + log.info( + 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + + if secret_engine.type == 'ad': + client.secrets.activedirectory.configure( + **secret_engine.secret_config ) - log.debug('Secret engine "%s" at path "%s" is tuned.', - secret_engine.type, secret_engine.path) - else: - log.debug( - 'Secret engine "%s" at path "%s" is not enabled. Enabling now...', - secret_engine.type, - secret_engine.path) - - client.sys.enable_secrets_engine( - backend_type=secret_engine.type, - path=secret_engine.path, - description=secret_engine.description, - config=secret_engine.config + if secret_engine.type == 'database': + client.secrets.database.configure( + **secret_engine.secret_config ) - new_secrets_engines.append("type: {} - path: {}".format(secret_engine.type, secret_engine.path)) + log.info( + 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - log.debug('Secret engine "%s" at path "%s" is enabled.', - secret_engine.type, secret_engine.path) + if secret_engine.extra_config: + log.info( + 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - if secret_engine.secret_config: - log.info( - 'Provisioning specific configurations for "%s" secrets engine...', secret_engine.type) + if secret_engine.type == 'ad': + # Get roles from vault + existing_roles = None + existing_roles = client.secrets.activedirectory.list_roles() + log.debug(existing_roles) - if secret_engine.type == 'ad': - client.secrets.activedirectory.configure( - **secret_engine.secret_config - ) - if secret_engine.type == 'database': - client.secrets.database.configure( - **secret_engine.secret_config + # Add new roles + local_roles = secret_engine.extra_config['roles'] + for key in local_roles: + log.debug('AD Role ["%s"] -> Role %s', + str(key), local_roles[key]) + + client.secrets.activedirectory.create_or_update_role( + name=key, + service_account_name=local_roles[key]['service_account_name'], + ttl=local_roles[key]['ttl'] ) - log.info( - 'Finished provisioning specific configurations for "%s" secrets engine...', secret_engine.type) - - if secret_engine.extra_config: - log.info( - 'Provisioning extra conifgurations for for "%s" secrets engine...', secret_engine.type) - - if secret_engine.type == 'ad': - # Get roles from vault - existing_roles = None - try: - existing_roles = client.secrets.activedirectory.list_roles() - log.debug(existing_roles) - except Exception: - raise - - # Add new roles - local_roles = secret_engine.extra_config['roles'] - for key in local_roles: - log.debug('AD Role ["%s"] -> Role %s', - str(key), local_roles[key]) - try: - client.secrets.activedirectory.create_or_update_role( - name=key, - service_account_name=local_roles[key]['service_account_name'], - ttl=local_roles[key]['ttl'] + # Remove missing roles + if existing_roles: + for role in existing_roles: + if role in {k.lower(): v for k, v in local_roles.items()}: + log.debug( + 'AD role ["%s"] exists in configuration, no cleanup necessary', role) + else: + log.debug( + 'Ad role ["%s"] does not exists in configuration, deleting...', role) + client.secrets.activedirectory.delete_role( + name=role ) - except Exception: - raise - - # Remove missing roles - if existing_roles: - for role in existing_roles: - if role in {k.lower(): v for k, v in local_roles.items()}: - log.debug( - 'AD role ["%s"] exists in configuration, no cleanup necessary', role) - else: - log.debug( - 'Ad role ["%s"] does not exists in configuration, deleting...', role) - client.secrets.activedirectory.delete_role( - name=role - ) - log.debug( - 'AD role has been ["%s"] deleted.', role) - else: - log.debug( - 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type - ) - except Exception: - raise - + log.debug( + 'AD role has been ["%s"] deleted.', role) + else: + log.debug( + 'Secret engine "%s" does not contain any extra configurations.', secret_engine.type + ) # Build return object - ret['changes']['old'] = ["type: {} - path: {}".format(ob.type, ob.path) for ob in remote_engines] + ret['changes']['old'] = [ + "type: {} - path: {}".format(ob.type, ob.path) for ob in remote_engines] ret['changes']['new'] = new_secrets_engines or "No changes" log.info('Finished proccessing and configuring secrets engines.') @@ -677,30 +641,27 @@ def cleanup_secrets_engines(self, client, remote_engines, local_engines, ret): """ log.info('Cleaning up secrets engines...') has_changes = False - - try: - for secret_engine in remote_engines: - if not (secret_engine.type == "system" or - secret_engine.type == "cubbyhole" or - secret_engine.type == "identity" or - secret_engine.type == "generic"): - if secret_engine in local_engines: - log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', - secret_engine.type, secret_engine.path) - else: - log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', - secret_engine.type, secret_engine.path) - has_changes = True - client.sys.disable_secrets_engine( - path=secret_engine.path - ) - log.debug('Secrets engine "%s" at path "%s" is disabled.', - secret_engine.type, secret_engine.type) - except Exception: - raise + for secret_engine in remote_engines: + if not (secret_engine.type == "system" or + secret_engine.type == "cubbyhole" or + secret_engine.type == "identity" or + secret_engine.type == "generic"): + if secret_engine in local_engines: + log.debug('Secrets engine "%s" at path "%s" exists in configuration, no cleanup necessary.', + secret_engine.type, secret_engine.path) + else: + log.debug('Secrets engine "%s" at path "%s" does not exist in configuration. Disabling...', + secret_engine.type, secret_engine.path) + has_changes = True + client.sys.disable_secrets_engine( + path=secret_engine.path + ) + log.debug('Secrets engine "%s" at path "%s" is disabled.', + secret_engine.type, secret_engine.type) if has_changes: - ret['changes']['new'] = ["type: {} - path: {}".format(ob.type, ob.path) for ob in local_engines] + ret['changes']['new'] = [ + "type: {} - path: {}".format(ob.type, ob.path) for ob in local_engines] log.info('Finished cleaning up secrets engines.') @@ -727,25 +688,22 @@ def get_remote_audit_devices(self, client, ret): """ log.info("Retrieving audit devices from vault...") devices = [] - try: - audit_devices_resp = client.sys.list_enabled_audit_devices() - log.debug(audit_devices_resp) - for device in audit_devices_resp['data']: - audit_device = audit_devices_resp[device] - devices.append( - VaultAuditDevice( - type=audit_device['type'], - path=(audit_device["path"] - if 'path' in audit_device else device), - description=audit_device["description"], - config=OrderedDict( - sorted(audit_device["options"].items())) - ) + audit_devices_resp = client.sys.list_enabled_audit_devices() + log.debug(audit_devices_resp) + for device in audit_devices_resp['data']: + audit_device = audit_devices_resp[device] + devices.append( + VaultAuditDevice( + type=audit_device['type'], + path=(audit_device["path"] + if 'path' in audit_device else device), + description=audit_device["description"], + config=OrderedDict( + sorted(audit_device["options"].items())) ) + ) - log.info('Finished retrieving audit devices from vault.') - except Exception: - raise + log.info('Finished retrieving audit devices from vault.') return devices @@ -762,24 +720,21 @@ def get_local_audit_devices(self, configs, ret): log.info("Loading audit devices from local config...") devices = [] if configs: - try: - for audit_device in configs: - config = None - if 'config' in audit_device: - if audit_device['config']: - config = OrderedDict( - sorted(audit_device["config"].items())) - - devices.append( - VaultAuditDevice( - type=audit_device["type"], - path=audit_device["path"], - description=audit_device["description"], - config=config - ) + for audit_device in configs: + config = None + if 'config' in audit_device: + if audit_device['config']: + config = OrderedDict( + sorted(audit_device["config"].items())) + + devices.append( + VaultAuditDevice( + type=audit_device["type"], + path=audit_device["path"], + description=audit_device["description"], + config=config ) - except Exception: - raise + ) log.info('Finished loading audit devices from local config.') @@ -796,35 +751,32 @@ def configure_audit_devices(self, client, remote_devices, local_devices, ret): """ log.info('Processing and configuring audit devices...') new_audit_devices = [] - try: - for audit_device in local_devices: - log.debug('Checking if audit device "%s" at path "%s" is enabled...', - audit_device.type, audit_device.path) + for audit_device in local_devices: + log.debug('Checking if audit device "%s" at path "%s" is enabled...', + audit_device.type, audit_device.path) - if audit_device in remote_devices: - log.debug('Audit device "%s" at path "%s" is already enabled.', - audit_device.type, audit_device.path) - else: - log.debug( - 'Audit device "%s" at path "%s" is not enabled. Enabling now...', - audit_device.type, - audit_device.path - ) - new_audit_devices.append(audit_device.type) - client.sys.enable_audit_device( - device_type=audit_device.type, - path=audit_device.path, - description=audit_device.description, - options=audit_device.config - ) - log.debug('Audit device "%s" at path "%s" is enabled.', - audit_device.type, audit_device.path) + if audit_device in remote_devices: + log.debug('Audit device "%s" at path "%s" is already enabled.', + audit_device.type, audit_device.path) + else: + log.debug( + 'Audit device "%s" at path "%s" is not enabled. Enabling now...', + audit_device.type, + audit_device.path + ) + new_audit_devices.append(audit_device.type) + client.sys.enable_audit_device( + device_type=audit_device.type, + path=audit_device.path, + description=audit_device.description, + options=audit_device.config + ) + log.debug('Audit device "%s" at path "%s" is enabled.', + audit_device.type, audit_device.path) - # Build return object - ret['changes']['old'] = [ob.type for ob in remote_devices] - ret['changes']['new'] = new_audit_devices or "No changes" - except Exception: - raise + # Build return object + ret['changes']['old'] = [ob.type for ob in remote_devices] + ret['changes']['new'] = new_audit_devices or "No changes" log.info('Finished processing audit devices.') @@ -839,20 +791,16 @@ def cleanup_audit_devices(self, client, remote_devices, local_devices, ret): """ log.info('Cleaning up audit devices...') has_changes = False - try: - for audit_device in remote_devices: - if audit_device not in local_devices: - log.info('Disabling audit device "%s" at path "%s"...', - audit_device.type, audit_device.path) - has_changes = True - client.sys.disable_audit_device( - path=audit_device.path - ) - - if has_changes: - ret['changes']['new'] = [ob.type for ob in local_devices] + for audit_device in remote_devices: + if audit_device not in local_devices: + log.info('Disabling audit device "%s" at path "%s"...', + audit_device.type, audit_device.path) + has_changes = True + client.sys.disable_audit_device( + path=audit_device.path + ) - except Exception: - raise + if has_changes: + ret['changes']['new'] = [ob.type for ob in local_devices] log.info('Finished cleaning up audit devices.') diff --git a/salt/vault/map.jinja b/salt/vault/map.jinja index 7eef7d8..1ed99cd 100644 --- a/salt/vault/map.jinja +++ b/salt/vault/map.jinja @@ -13,6 +13,3 @@ ) ) ) %} - -{#- Merge the vault pillar #} -{%- set vault = salt.pillar.get('vault', default=vault, merge=True) %} diff --git a/salt/vault/service.sls b/salt/vault/service.sls index 57e1ca3..4ae2d9d 100644 --- a/salt/vault/service.sls +++ b/salt/vault/service.sls @@ -8,26 +8,16 @@ vault_service_init_file_managed: - defaults: dev_configs: {{ vault.dev_configs }} -{%- if not vault.dev_mode %} - -manage_selinux_mode: - selinux.mode: - - name: permissive - -vault_service_running: - service.running: - - name: vault - - enable: True - - reload: True - - require: - - selinux: manage_selinux_mode - -{%- else %} - vault_service_running: service.running: - name: vault - enable: True - reload: True +{%- if not vault.dev_mode %} +manage_selinux_mode: + selinux.mode: + - name: permissive + - required_in: + - service: vault_service_running {%- endif %} diff --git a/tests/module_test.go b/tests/module_test.go index ab00874..720e08f 100644 --- a/tests/module_test.go +++ b/tests/module_test.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "log" "net/http" + "os" "testing" "time" @@ -17,62 +18,40 @@ import ( func TestModule(t *testing.T) { files, err := ioutil.ReadDir("./") + if err != nil { log.Fatal(err) } for _, f := range files { // look for directories with test cases in it - if f.IsDir() { - if f.Name() != "vendor" { - testFiles, testErr := ioutil.ReadDir(f.Name()) - if testErr != nil { - log.Fatal(testErr) - } - - // see if a prereq directory exists - for _, testF := range testFiles { - if testF.IsDir() { - if testF.Name() == "prereq" { - directory := f.Name() + "/" + testF.Name() - runTerraformPreReq(t, directory) - } - } + if f.IsDir() && f.Name() != "vendor" { + t.Run(f.Name(), func(t *testing.T) { + // check if a prereq directory exists + prereqDir := f.Name() + "/prereq/" + if _, err := os.Stat(prereqDir); err == nil { + prereqOptions := createTerraformOptions(prereqDir) + defer terraform.Destroy(t, prereqOptions) + terraform.InitAndApply(t, prereqOptions) } - // run terraform code - runTerraform(t, f.Name()) - } + // run terraform code for test case + terraformOptions := createTerraformOptions(f.Name()) + defer terraform.Destroy(t, terraformOptions) + terraform.InitAndApply(t, terraformOptions) + testVaultViaAlb(t, terraformOptions) + }) } } } -// The prequisite function runs the terraform code but doesn't destroy it afterwards so that the state can be used for further testing -func runTerraformPreReq(t *testing.T, directory string) { +func createTerraformOptions(directory string) *terraform.Options { terraformOptions := &terraform.Options{ TerraformDir: directory, NoColor: true, } - // This will run `terraform init` and `terraform apply` and fail the test if there are any errors - terraform.InitAndApply(t, terraformOptions) -} - -func runTerraform(t *testing.T, directory string) { - terraformOptions := &terraform.Options{ - // The path to where your Terraform code is located - TerraformDir: directory, - // Disable color output - NoColor: true, - } - - // At the end of the test, run `terraform destroy` to clean up any resources that were created - defer terraform.Destroy(t, terraformOptions) - - // This will run `terraform init` and `terraform apply` and fail the test if there are any errors - terraform.InitAndApply(t, terraformOptions) - - testVaultViaAlb(t, terraformOptions) + return terraformOptions } // Use the Vault client to connect to the Vault via the ALB, via the route53 record, and make sure it works without @@ -82,7 +61,7 @@ func testVaultViaAlb(t *testing.T, terraformOptions *terraform.Options) { description := fmt.Sprintf("Testing Vault via ALB at cluster URL %s", clusterURL) logger.Logf(t, description) - maxRetries := 30 + maxRetries := 3 sleepBetweenRetries := 10 * time.Second vaultClient := createVaultClient(t, clusterURL) diff --git a/tests/vault-py2/main.tf b/tests/vault-py2/main.tf index 8ffdc2b..b59fc85 100644 --- a/tests/vault-py2/main.tf +++ b/tests/vault-py2/main.tf @@ -34,7 +34,7 @@ module "base" { # Watchmaker settings watchmaker_config = var.watchmaker_config - toggle_update = "A" + toggle_update = "B" } output "cluster_url" { diff --git a/tests/vault-py2/variables.tf b/tests/vault-py2/variables.tf index e529dd9..d7743a7 100644 --- a/tests/vault-py2/variables.tf +++ b/tests/vault-py2/variables.tf @@ -30,7 +30,7 @@ variable "vault_version" { } variable "vault_pillar_path" { - type = string + type = string description = "Specify the path to vault pillar" } diff --git a/tests/vault-py3/main.tf b/tests/vault-py3/main.tf index 045c3fc..15f4f34 100644 --- a/tests/vault-py3/main.tf +++ b/tests/vault-py3/main.tf @@ -29,8 +29,9 @@ module "vault-py3" { certificate_arn = var.certificate_arn # Vault settings - vault_version = var.vault_version - dynamodb_table = var.dynamodb_table + vault_version = var.vault_version + vault_pillar_path = var.vault_pillar_path + dynamodb_table = var.dynamodb_table # Watchmaker settings watchmaker_config = var.watchmaker_config diff --git a/tests/vault-py3/pillar/top.sls b/tests/vault-py3/pillar/top.sls new file mode 100644 index 0000000..7e34ce6 --- /dev/null +++ b/tests/vault-py3/pillar/top.sls @@ -0,0 +1,3 @@ +base: + "*": + - vault diff --git a/tests/vault-py3/pillar/vault/init.sls b/tests/vault-py3/pillar/vault/init.sls new file mode 100644 index 0000000..f3a2a97 --- /dev/null +++ b/tests/vault-py3/pillar/vault/init.sls @@ -0,0 +1,48 @@ +vault: + lookup: + api_port: ${api_port} + cluster_port: ${cluster_port} + dynamodb_table: ${dynamodb_table} + inbound_cidrs: ${inbound_cidrs} + kms_key_id: ${kms_key_id} + logs_path: ${logs_path} + logs_dir: ${logs_dir} + region: ${region} + ssm_path: ${ssm_path} + version: ${vault_version} + + secrets_engines: + - type: kv + path: services + description: Sevices specific folders + config: + default_lease_ttl: 1800 + max_lease_ttl: 1800 + + auth_methods: + - type: token + path: token + description: token based credentials + config: + default_lease_ttl: 0 + max_lease_ttl: 0 + + audit_devices: + - type: file + path: file_log + description: first audit device + config: + file_path: /etc/vault/logs/audit.log + + policies: + - name: xyz_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create, update, delete, list]} + + - name: abc_admin + content: + path: + '*': {capabilities: [read, create]} + 'stage/*': {capabilities: [read, create]} diff --git a/tests/vault-py3/variables.tf b/tests/vault-py3/variables.tf index cf4bda8..c7377e3 100644 --- a/tests/vault-py3/variables.tf +++ b/tests/vault-py3/variables.tf @@ -51,6 +51,11 @@ variable "vault_version" { type = string } +variable "vault_pillar_path" { + type = string + description = "Specify the path to vault pillar" +} + variable "dynamodb_table" { description = "Name of the Dynamodb to be used as storage backend for Vault" type = string