From 9b6d7c4912ae61d2b15ed0146a1ef630271b9935 Mon Sep 17 00:00:00 2001 From: Brian Hannafious Date: Fri, 19 Jan 2018 09:44:57 -0800 Subject: [PATCH] Streamlined deployment --- .travis.yml | 10 +- Makefile | 7 + README.md | 111 ++----- chalice/Makefile | 1 + configure.py | 107 +++++++ daemons/Makefile | 3 - deployment/.gitignore | 11 + deployment/Makefile | 51 +++ deployment/active | 1 + .../dev/application_secrets.json.enc | Bin deployment/dev/buckets/backend.tf | 11 + deployment/dev/buckets/dss_variables.tf | 1 + deployment/dev/buckets/gs_buckets.tf | 56 ++++ deployment/dev/buckets/local_variables.tf | 1 + deployment/dev/buckets/providers.tf | 1 + deployment/dev/buckets/s3_buckets.tf | 46 +++ deployment/dev/buckets/variables.tf | 1 + deployment/dev/domain/backend.tf | 11 + deployment/dev/domain/domain.tf | 31 ++ deployment/dev/domain/dss_variables.tf | 1 + deployment/dev/domain/local_variables.tf | 1 + deployment/dev/domain/providers.tf | 1 + deployment/dev/domain/variables.tf | 1 + deployment/dev/dss_variables.tf | 88 ++++++ deployment/dev/elasticsearch/backend.tf | 11 + deployment/dev/elasticsearch/dss_variables.tf | 1 + deployment/dev/elasticsearch/elasticsearch.tf | 82 +++++ .../dev/elasticsearch/local_variables.tf | 1 + deployment/dev/elasticsearch/providers.tf | 1 + deployment/dev/elasticsearch/variables.tf | 1 + .../dev/gcp-credentials.json.enc | Bin deployment/dev/local_variables.tf | 12 + deployment/dev/providers.tf | 17 + deployment/dev/variables.tf | 24 ++ deployment/prod/backend_config.hcl | 4 + deployment/prod/buckets/backend.tf | 11 + deployment/prod/buckets/dss_variables.tf | 1 + deployment/prod/buckets/gs_buckets.tf | 65 ++++ deployment/prod/buckets/local_variables.tf | 1 + deployment/prod/buckets/providers.tf | 1 + deployment/prod/buckets/s3_buckets.tf | 51 +++ deployment/prod/buckets/variables.tf | 1 + deployment/prod/domain/backend.tf | 13 + deployment/prod/domain/domain.tf | 29 ++ deployment/prod/domain/dss_variables.tf | 1 + deployment/prod/domain/local_variables.tf | 1 + deployment/prod/domain/providers.tf | 1 + deployment/prod/domain/variables.tf | 1 + deployment/prod/dss_variables.tf | 88 ++++++ deployment/prod/elasticsearch/backend.tf | 13 + .../prod/elasticsearch/dss_variables.tf | 1 + .../prod/elasticsearch/elasticsearch.tf | 118 +++++++ .../prod/elasticsearch/local_variables.tf | 1 + deployment/prod/elasticsearch/providers.tf | 1 + deployment/prod/elasticsearch/variables.tf | 1 + deployment/prod/providers.tf | 23 ++ deployment/prod/variables.tf | 24 ++ deployment/staging/buckets/backend.tf | 11 + deployment/staging/buckets/dss_variables.tf | 1 + deployment/staging/buckets/gs_buckets.tf | 23 ++ deployment/staging/buckets/s3_buckets.tf | 41 +++ deployment/staging/dss_variables.tf | 55 ++++ deployment/staging/elasticsearch/backend.tf | 11 + .../staging/elasticsearch/dss_variables.tf | 1 + .../staging/elasticsearch/elasticsearch.tf | 23 ++ .../staging/gs_service_account/backend.tf | 11 + .../gs_service_account/dss_variables.tf | 1 + .../gs_service_account/service_account.tf | 19 ++ dss_deployment/__init__.py | 190 +++++++++++ .../stage_template/backend_config.hcl | 1 + .../stage_template/buckets/gs_buckets.tf | 65 ++++ .../stage_template/buckets/s3_buckets.tf | 51 +++ .../stage_template/domain/domain.tf | 38 +++ .../stage_template/dss_variables.tf | 88 ++++++ .../elasticsearch/elasticsearch.tf | 102 ++++++ .../event_relay_user/event_relay_user.tf | 24 ++ .../stage_template/local_variables.tf | 12 + dss_deployment/stage_template/providers.tf | 22 ++ dss_deployment/stage_template/variables.tf | 24 ++ environment | 38 +-- scripts/create_aws_event_relay_user.py | 10 +- scripts/create_config_gs_service_account.sh | 33 ++ scripts/deploy_checkout_lifecycle.py | 22 -- scripts/destructor.py | 298 ++++++++++++++++++ scripts/enable_gs_services.sh | 10 + scripts/set_apigateway_base_path_mapping.py | 40 +++ scripts/set_event_relay_parameters.py | 67 ++-- 87 files changed, 2313 insertions(+), 173 deletions(-) create mode 100755 configure.py create mode 100644 deployment/.gitignore create mode 100644 deployment/Makefile create mode 120000 deployment/active rename application_secrets.json.enc => deployment/dev/application_secrets.json.enc (100%) create mode 100644 deployment/dev/buckets/backend.tf create mode 120000 deployment/dev/buckets/dss_variables.tf create mode 100644 deployment/dev/buckets/gs_buckets.tf create mode 120000 deployment/dev/buckets/local_variables.tf create mode 120000 deployment/dev/buckets/providers.tf create mode 100644 deployment/dev/buckets/s3_buckets.tf create mode 120000 deployment/dev/buckets/variables.tf create mode 100644 deployment/dev/domain/backend.tf create mode 100644 deployment/dev/domain/domain.tf create mode 120000 deployment/dev/domain/dss_variables.tf create mode 120000 deployment/dev/domain/local_variables.tf create mode 120000 deployment/dev/domain/providers.tf create mode 120000 deployment/dev/domain/variables.tf create mode 100644 deployment/dev/dss_variables.tf create mode 100644 deployment/dev/elasticsearch/backend.tf create mode 120000 deployment/dev/elasticsearch/dss_variables.tf create mode 100644 deployment/dev/elasticsearch/elasticsearch.tf create mode 120000 deployment/dev/elasticsearch/local_variables.tf create mode 120000 deployment/dev/elasticsearch/providers.tf create mode 120000 deployment/dev/elasticsearch/variables.tf rename gcp-credentials.json.enc => deployment/dev/gcp-credentials.json.enc (100%) create mode 100644 deployment/dev/local_variables.tf create mode 100644 deployment/dev/providers.tf create mode 100644 deployment/dev/variables.tf create mode 100644 deployment/prod/backend_config.hcl create mode 100644 deployment/prod/buckets/backend.tf create mode 120000 deployment/prod/buckets/dss_variables.tf create mode 100644 deployment/prod/buckets/gs_buckets.tf create mode 120000 deployment/prod/buckets/local_variables.tf create mode 120000 deployment/prod/buckets/providers.tf create mode 100644 deployment/prod/buckets/s3_buckets.tf create mode 120000 deployment/prod/buckets/variables.tf create mode 100644 deployment/prod/domain/backend.tf create mode 100644 deployment/prod/domain/domain.tf create mode 120000 deployment/prod/domain/dss_variables.tf create mode 120000 deployment/prod/domain/local_variables.tf create mode 120000 deployment/prod/domain/providers.tf create mode 120000 deployment/prod/domain/variables.tf create mode 100644 deployment/prod/dss_variables.tf create mode 100644 deployment/prod/elasticsearch/backend.tf create mode 120000 deployment/prod/elasticsearch/dss_variables.tf create mode 100644 deployment/prod/elasticsearch/elasticsearch.tf create mode 120000 deployment/prod/elasticsearch/local_variables.tf create mode 120000 deployment/prod/elasticsearch/providers.tf create mode 120000 deployment/prod/elasticsearch/variables.tf create mode 100644 deployment/prod/providers.tf create mode 100644 deployment/prod/variables.tf create mode 100644 deployment/staging/buckets/backend.tf create mode 120000 deployment/staging/buckets/dss_variables.tf create mode 100644 deployment/staging/buckets/gs_buckets.tf create mode 100644 deployment/staging/buckets/s3_buckets.tf create mode 100644 deployment/staging/dss_variables.tf create mode 100644 deployment/staging/elasticsearch/backend.tf create mode 120000 deployment/staging/elasticsearch/dss_variables.tf create mode 100644 deployment/staging/elasticsearch/elasticsearch.tf create mode 100644 deployment/staging/gs_service_account/backend.tf create mode 120000 deployment/staging/gs_service_account/dss_variables.tf create mode 100644 deployment/staging/gs_service_account/service_account.tf create mode 100644 dss_deployment/__init__.py create mode 100644 dss_deployment/stage_template/backend_config.hcl create mode 100644 dss_deployment/stage_template/buckets/gs_buckets.tf create mode 100644 dss_deployment/stage_template/buckets/s3_buckets.tf create mode 100644 dss_deployment/stage_template/domain/domain.tf create mode 100644 dss_deployment/stage_template/dss_variables.tf create mode 100644 dss_deployment/stage_template/elasticsearch/elasticsearch.tf create mode 100644 dss_deployment/stage_template/event_relay_user/event_relay_user.tf create mode 100644 dss_deployment/stage_template/local_variables.tf create mode 100644 dss_deployment/stage_template/providers.tf create mode 100644 dss_deployment/stage_template/variables.tf create mode 100755 scripts/create_config_gs_service_account.sh delete mode 100755 scripts/deploy_checkout_lifecycle.py create mode 100755 scripts/destructor.py create mode 100755 scripts/enable_gs_services.sh create mode 100755 scripts/set_apigateway_base_path_mapping.py diff --git a/.travis.yml b/.travis.yml index 22d4188741..544f8d73c9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,10 +19,12 @@ addons: - gettext before_install: -- openssl aes-256-cbc -K $encrypted_ead445d7a1e2_key -iv $encrypted_ead445d7a1e2_iv - -in gcp-credentials.json.enc -out gcp-credentials.json -d -- openssl aes-256-cbc -K $encrypted_ead445d7a1e2_key -iv $encrypted_ead445d7a1e2_iv - -in application_secrets.json.enc -out application_secrets.json -d +- (cd deployment/dev ; openssl aes-256-cbc -K $encrypted_ead445d7a1e2_key -iv $encrypted_ead445d7a1e2_iv + -in gcp-credentials.json.enc -out gcp-credentials.json -d) +- (cd deployment/dev ; openssl aes-256-cbc -K $encrypted_ead445d7a1e2_key -iv $encrypted_ead445d7a1e2_iv + -in application_secrets.json.enc -out application_secrets.json -d) +- rm deployment/active +- (cd deployment ; ln -s dev active) - source environment install: diff --git a/Makefile b/Makefile index bf63e35533..66f5a13129 100644 --- a/Makefile +++ b/Makefile @@ -61,8 +61,15 @@ scaletest: deploy: deploy-chalice deploy-daemons +components := $(shell basename $(shell ls -d deployment/active/*/)) +deploy-infra: + scripts/enable_gs_services.sh + $(MAKE) -C deployment apply + scripts/set_event_relay_parameters.py + deploy-chalice: $(MAKE) -C chalice deploy + scripts/set_apigateway_base_path_mapping.py deploy-daemons: deploy-daemons-serial deploy-daemons-parallel diff --git a/README.md b/README.md index ab8ec82016..202f6e1a42 100644 --- a/README.md +++ b/README.md @@ -34,15 +34,6 @@ The tests require certain node.js packages. They must be installed using `npm`, Tests also use data from the data-bundle-examples subrepository. Run: `git submodule update --init` -#### Environment Variables - -Environment variables are required for test and deployment. The required environment variables and their default values -are in the file `environment`. To customize the values of these environment variables: - -1. Copy `environment.local.example` to `environment.local` -2. Edit `environment.local` to add custom entries that override the default values in `environment` - -Run `source environment` now and whenever these environment files are modified. #### Configuring cloud-specific access credentials @@ -51,47 +42,34 @@ Run `source environment` now and whenever these environment files are modified. 1. Follow the instructions in http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html to get the `aws` command line utility. -2. Create an S3 bucket that you want DSS to use and in `environment.local`, set the environment variable `DSS_S3_BUCKET` - to the name of that bucket. Make sure the bucket region is consistent with `AWS_DEFAULT_REGION` in - `environment.local`. - -3. Repeat the previous step for - - * DSS_S3_CHECKOUT_BUCKET - * DSS_S3_CHECKOUT_BUCKET_TEST - * DSS_S3_CHECKOUT_BUCKET_TEST_FIXTURES - -4. If you wish to run the unit tests, you must create two more S3 buckets, one for test data and another for test - fixtures, and set the environment variables `DSS_S3_BUCKET_TEST` and `DSS_S3_BUCKET_TEST_FIXTURES` to the names of - those buckets. - -Hint: To create S3 buckets from the command line, use `aws s3 mb --region REGION s3://BUCKET_NAME/`. +2. To configure your account credentials and named profiles for the `aws` cli, see + https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html and + https://docs.aws.amazon.com/cli/latest/userguide/cli-multiple-profiles.html ##### GCP 1. Follow the instructions in https://cloud.google.com/sdk/downloads to get the `gcloud` command line utility. -2. In the [Google Cloud Console](https://console.cloud.google.com/), select the correct Google user account on the top - right and the correct GCP project in the drop down in the top center. Go to "IAM & Admin", then "Service accounts", - then click "Create service account" and select "Furnish a new private key". Under "Roles" select "Project – Owner", - "Service Accounts – Service Account User" and "Cloud Functions – Cloud Function Developer". Create the account and - download the service account key JSON file. +2. Run `gcloud auth login` to authorize the gcloud cli. + +#### Terraform -3. In `environment.local`, set the environment variable `GOOGLE_APPLICATION_CREDENTIALS` to the path of the service - account key JSON file. +Some cloud assets are managed by Terraform, inlcuding the storage buckets and Elasticsearch domain. -4. Choose a region that has support for Cloud Functions and set `GCP_DEFAULT_REGION` to that region. See - https://cloud.google.com/about/locations/ for a list of supported regions. +1. Follow the instructions in https://www.terraform.io/intro/getting-started/install.html to get the + `terraform` command line utility. -5. Run `gcloud auth activate-service-account --key-file=/path/to/service-account.json`. +2. Run `configure.py` to prepare the deployment. -6. Run `gcloud config set project PROJECT_ID` where PROJECT_ID is the ID, not the name (!) of the GCP project you - selected earlier. +3. Infrastructure deployment definiations may be further customized by editing the terraform scripts in + 'deployment/active' subdirectories. -7. Enable required APIs: `gcloud services enable cloudfunctions.googleapis.com`; `gcloud services - enable runtimeconfig.googleapis.com` +Now you may deploy the cloud assets with + make deploy-infra -8. Generate OAuth application secrets to be used for your instance: +##### GCP Application Secrets + +3. Generate OAuth application secrets to be used for your instance: 1) Go to https://console.developers.google.com/apis/credentials (you may have to select Organization and Project again) @@ -107,22 +85,14 @@ Hint: To create S3 buckets from the command line, use `aws s3 mb --region REGION 6) Click the edit icon for the new credentials and click *Download JSON* - 7) Place the downloaded JSON file into the project root as `application_secrets.json` - -9. Create a Google Cloud Storage bucket and in `environment.local`, set the environment variable `DSS_GS_BUCKET` to the - name of that bucket. Make sure the bucket region is consistent with `GCP_DEFAULT_REGION` in `environment.local`. - -10. Repeat the previous step for + 7) Place the downloaded JSON file into active stage root as `deployment/active/application_secrets.json` - * DSS_GS_CHECKOUT_BUCKET - * DSS_GS_CHECKOUT_BUCKET_TEST - * DSS_GS_CHECKOUT_BUCKET_TEST_FIXTURES +#### Environment Variables -11. If you wish to run the unit tests, you must create two more buckets, one for test data and another for test - fixtures, and set the environment variables `DSS_GS_BUCKET_TEST` and `DSS_GS_BUCKET_TEST_FIXTURES` to the names of - those buckets. +Environment variables are required for test and deployment. The required environment variables and their default values +are in the file `environment`. To customize the values of these environment variables, run `configure.py`. -Hint: To create GCS buckets from the command line, use `gsutil mb -c regional -l REGION gs://BUCKET_NAME/`. +Run `source environment` now and whenever `configure.py` is executed. ##### Azure @@ -158,47 +128,10 @@ Run `make test` in the top-level `data-store` directory. Assuming the tests have passed above, the next step is to manually deploy. See the section below for information on CI/CD with Travis if continuous deployment is your goal. -The AWS Elasticsearch Service is used for metadata indexing. Currently, the AWS Elasticsearch Service must be configured -manually. The AWS Elasticsearch Service domain name must either: - -* have the value `dss-index-$DSS_DEPLOYMENT_STAGE` - -* or, the environment variable `DSS_ES_DOMAIN` must be set to the domain name of the AWS Elasticsearch Service instance - to be used. - -For typical development deployments the t2.small.elasticsearch instance type is more than sufficient. - Now deploy using make: make deploy -Set up AWS API Gateway. The gateway is automatically set up for you and associated with the Lambda. However, to get a -friendly domain name, you need to follow the -directions [here](http://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-custom-domains.html). In summary: - -1. Generate a HTTPS certificate via AWS Certificate Manager (ACM). See note below on choosing a region for the - certificate. - -2. Set up the custom domain name in the API gateway console. See note below on the DNS record type. - -3. In Amazon Route 53 point the domain to the API gateway - -4. In the API Gateway, fill in the endpoints for the custom domain name e.g. Path=`/`, Destination=`dss` and - `dev`. These might be different based on the profile used (dev, stage, etc). - -5. Set the environment variable `API_DOMAIN_NAME` to your domain name in the `environment.local` file. - -Note: The certificate should be in the same region as the API gateway or, if that's not possible, in `us-east-1`. If the -ACM certificate's region is `us-east-1` and the API gateway is in another region, the type of the custom domain name -must be *Edge Optimized*. Provisioning such a domain name typically takes up to 40 minutes because the certificate needs -to be replicated to all involved CloudFront edge servers. The corresponding record set in Route 53 needs to be an -**alias** A record, not a CNAME or a regular A record, and it must point to the CloudFront host name associated with the -edge-optimized domain name. Starting November 2017, API gateway supports regional certificates i.e., certificates in -regions other than `us-east-1`. This makes it possible to match the certificate's region with that of the API -gateway. and cuts the provisioning of the custom domain name down to seconds. Simply create the certificate in the same -region as that of the API gateway, create a custom domain name of type *Regional* and in Route53 add a CNAME recordset -that points to the gateway's canonical host name. - If successful, you should be able to see the Swagger API documentation at: https:// diff --git a/chalice/Makefile b/chalice/Makefile index 763a2f7c44..48246ef86a 100644 --- a/chalice/Makefile +++ b/chalice/Makefile @@ -6,5 +6,6 @@ deploy: cp -R ../dss ../dss-api.yml chalicelib cp "$(GOOGLE_APPLICATION_CREDENTIALS)" chalicelib/gcp-credentials.json cp "$(GOOGLE_APPLICATION_SECRETS)" chalicelib/application_secrets.json + chmod -R ugo+rX chalicelib ./build_deploy_config.sh ../scripts/dss-chalice deploy --no-autogen-policy --stage $(DSS_DEPLOYMENT_STAGE) --api-gateway-stage $(DSS_DEPLOYMENT_STAGE) diff --git a/configure.py b/configure.py new file mode 100755 index 0000000000..585dca8506 --- /dev/null +++ b/configure.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +import os +import sys +import copy +import enum +import click +import subprocess +import dss_deployment + + +pkg_root = os.path.abspath(os.path.dirname(__file__)) # noqa + + +class Accept(enum.Enum): + all = enum.auto() + all_but_none = enum.auto() + nothing = enum.auto() + + +def run(command): + out = subprocess.run(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding='utf-8') + try: + out.check_returncode() + except subprocess.CalledProcessError: + raise Exception(f'\t{out.stderr}') + return out.stdout.strip() + + +def request_input(info, key, stage, accept): + if info[key]['default'] is not None: + default = info[key]['default'].format(stage=stage) + else: + default = None + + if Accept.all == accept: + print(f'setting {key}={default}') + info[key]['default'] = default + elif Accept.all_but_none == accept and default is not None: + print(f'setting {key}={default}') + info[key]['default'] = default + else: + print() + if info[key]['description']: + print(info[key]['description']) + val = click.prompt(f'{key}=', default) + if 'none' == val.lower(): + val = None + info[key]['default'] = val + + +def get_user_input(deployment, accept): + if not deployment.variables['gcp_project']['default']: + deployment.variables['gcp_project']['default'] = run("gcloud config get-value project") + + if not deployment.variables['gcp_service_account_id']['default']: + deployment.variables['gcp_service_account_id']['default'] = f'service-account-{deployment.stage}' + + print(deployment.variables['API_DOMAIN_NAME']) + + skip = ['DSS_DEPLOYMENT_STAGE'] + for key in deployment.variables: + if key in skip: + continue + request_input(deployment.variables, key, deployment.stage, accept) + + +@click.command() +@click.option('--stage', prompt="Deployment stage name") +@click.option('--accept-defaults', is_flag=True, default=False) +def main(stage, accept_defaults): + deployment = dss_deployment.DSSDeployment(stage) + exists = os.path.exists(deployment.root) + + if exists and accept_defaults: + accept = Accept.all + elif accept_defaults: + accept = Accept.all_but_none + else: + accept = Accept.nothing + + get_user_input(deployment, accept) + + deployment.write() + dss_deployment.set_active_stage(stage) + + print() + print('Deployment Steps') + print('\t1. Customize Terraform scripting as needed:') + for comp in os.listdir(deployment.root): + path = os.path.join(deployment.root, comp) + if not os.path.isdir(path): + continue + print(f'\t\t{path}') + print('\t2. run `scripts/create_config_gs_service_account.sh`') + print('\t3. Visit the google console to aquire `application_secrets.json`') + print('\t4. run `source environment`') + print('\t5. run `make deploy-infra`') + print('\t6. run `make deploy`') + + +if __name__ == "__main__": + main() diff --git a/daemons/Makefile b/daemons/Makefile index 7530bf6608..3a122d8359 100644 --- a/daemons/Makefile +++ b/daemons/Makefile @@ -32,9 +32,6 @@ $(SERIAL_AWS_DAEMONS) $(PARALLEL_AWS_DAEMONS) scheduled-ci-build: ../tests/daemons/sample_s3_bundle_created_event.json.template \ ../tests/daemons/a47b90b2-0967-4fbf-87bc-c6c12db3fedf.2017-07-12T055120.037644Z; \ fi - @if [[ $@ == "dss-s3-copy-sfn" || %@ == "dss-s3-copy-write-metadata-sfn" || $@ == "dss-checkout" || $@ == "dss-scalability-test" ]]; then \ - $(DSS_HOME)/scripts/deploy_checkout_lifecycle.py; \ - fi dss-gs-event-relay: $(DSS_HOME)/scripts/deploy_gcf.py $@ --entry-point "dss_gs_bucket_events_$(subst -,_,$(DSS_GS_BUCKET))" diff --git a/deployment/.gitignore b/deployment/.gitignore new file mode 100644 index 0000000000..11d56e23be --- /dev/null +++ b/deployment/.gitignore @@ -0,0 +1,11 @@ +* +!.gitignore +!Makefile +!active +!*/ +!/dev/** +!/prod/** +**/gcp-credentials.json +**/application_secrets.json +**/.terraform +**/local_variables.tf diff --git a/deployment/Makefile b/deployment/Makefile new file mode 100644 index 0000000000..0b08b9d8c0 --- /dev/null +++ b/deployment/Makefile @@ -0,0 +1,51 @@ +COMPONENT= +STAGEPATH=${shell cd active && pwd -P} +STAGE=${shell basename $(STAGEPATH)} +DIRS=${shell find $(STAGE)/* -not -path "*/\.*" -type d} +COMPONENTS=${shell basename $(DIRS)} +AWS_PROFILE=${shell cat $(STAGE)/local_variables.tf | jq -r .variable.aws_profile.default} + +all: init + +init: + @echo $(STAGE) + @echo $(COMPONENTS) + @for c in $(COMPONENTS); do \ + $(MAKE) init-component STAGE=$(STAGE) COMPONENT=$$c; \ + done + +apply: + @echo $(STAGE) + @for c in $(COMPONENTS); do \ + $(MAKE) apply-component STAGE=$(STAGE) COMPONENT=$$c; \ + done + +destroy: + @echo $(STAGE) + @for c in $(COMPONENTS); do \ + $(MAKE) destroy-component STAGE=$(STAGE) COMPONENT=$$c; \ + done + +clean: + @echo $(STAGE) + @for c in $(COMPONENTS); do \ + $(MAKE) clean-component STAGE=$(STAGE) COMPONENT=$$c; \ + done + +init-component: + @if [[ -e $(STAGE)/backend_config.hcl ]]; then \ + cd $(STAGE)/$(COMPONENT); AWS_PROFILE=$(AWS_PROFILE) terraform init --backend-config=../backend_config.hcl; \ + else \ + cd $(STAGE)/$(COMPONENT); AWS_PROFILE=$(AWS_PROFILE) terraform init; \ + fi + +apply-component: init-component + cd $(STAGE)/$(COMPONENT); AWS_PROFILE=$(AWS_PROFILE) terraform apply + +destroy-component: init-component + cd $(STAGE)/$(COMPONENT); AWS_PROFILE=$(AWS_PROFILE) terraform destroy + +clean-component: + cd $(STAGE)/$(COMPONENT); -rm -rf .terraform + +.PHONY: init plan apply clean diff --git a/deployment/active b/deployment/active new file mode 120000 index 0000000000..415df68c78 --- /dev/null +++ b/deployment/active @@ -0,0 +1 @@ +bhannafitest \ No newline at end of file diff --git a/application_secrets.json.enc b/deployment/dev/application_secrets.json.enc similarity index 100% rename from application_secrets.json.enc rename to deployment/dev/application_secrets.json.enc diff --git a/deployment/dev/buckets/backend.tf b/deployment/dev/buckets/backend.tf new file mode 100644 index 0000000000..79b75bc2db --- /dev/null +++ b/deployment/dev/buckets/backend.tf @@ -0,0 +1,11 @@ +{ + "terraform": { + "backend": { + "s3": { + "bucket": "org-humancellatlas-dss-config", + "key": "dss-buckets-dev.tfstate", + "region": "us-east-1" + } + } + } +} \ No newline at end of file diff --git a/deployment/dev/buckets/dss_variables.tf b/deployment/dev/buckets/dss_variables.tf new file mode 120000 index 0000000000..a1c5e71c88 --- /dev/null +++ b/deployment/dev/buckets/dss_variables.tf @@ -0,0 +1 @@ +../dss_variables.tf \ No newline at end of file diff --git a/deployment/dev/buckets/gs_buckets.tf b/deployment/dev/buckets/gs_buckets.tf new file mode 100644 index 0000000000..fe46b38e25 --- /dev/null +++ b/deployment/dev/buckets/gs_buckets.tf @@ -0,0 +1,56 @@ +resource google_storage_bucket dss_gs_bucket { + count = "${length(var.DSS_GS_BUCKET) > 0 ? 1 : 0}" + name = "${var.DSS_GS_BUCKET}" + provider = "google" + storage_class = "REGIONAL" + location = "us-central1" +} + +resource google_storage_bucket dss_gs_bucket_test { + count = "${length(var.DSS_GS_BUCKET_TEST) > 0 ? 1 : 0}" + name = "${var.DSS_GS_BUCKET_TEST}" + provider = "google" + location = "US-EAST1" + storage_class = "REGIONAL" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 7 + is_live = true + } + } +} + +resource google_storage_bucket dss_gs_bucket_test_fixtures { + count = "${length(var.DSS_GS_BUCKET_TEST_FIXTURES) > 0 ? 1 : 0}" + name = "${var.DSS_GS_BUCKET_TEST_FIXTURES}" + provider = "google" + location = "US-EAST1" + storage_class = "REGIONAL" +} + +resource google_storage_bucket dss_gs_checkout_bucket { + count = "${length(var.DSS_GS_CHECKOUT_BUCKET) > 0 ? 1 : 0}" + name = "${var.DSS_GS_CHECKOUT_BUCKET}" + provider = "google" + location = "us-central1" + storage_class = "REGIONAL" +} + +resource google_storage_bucket dss_gs_checkout_bucket_test { + count = "${length(var.DSS_GS_CHECKOUT_BUCKET_TEST) > 0 ? 1 : 0}" + name = "${var.DSS_GS_CHECKOUT_BUCKET_TEST}" + provider = "google" + location = "us-central1" + storage_class = "REGIONAL" +} + +resource google_storage_bucket dss_gs_checkout_bucket_test_fixtures { + count = "${length(var.DSS_GS_CHECKOUT_BUCKET_TEST_FIXTURES) > 0 ? 1 : 0}" + name = "${var.DSS_GS_CHECKOUT_BUCKET_TEST_FIXTURES}" + provider = "google" + location = "us-central1" + storage_class = "REGIONAL" +} diff --git a/deployment/dev/buckets/local_variables.tf b/deployment/dev/buckets/local_variables.tf new file mode 120000 index 0000000000..5467e01f5e --- /dev/null +++ b/deployment/dev/buckets/local_variables.tf @@ -0,0 +1 @@ +../local_variables.tf \ No newline at end of file diff --git a/deployment/dev/buckets/providers.tf b/deployment/dev/buckets/providers.tf new file mode 120000 index 0000000000..7244d01e4c --- /dev/null +++ b/deployment/dev/buckets/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/deployment/dev/buckets/s3_buckets.tf b/deployment/dev/buckets/s3_buckets.tf new file mode 100644 index 0000000000..0a2a5377a6 --- /dev/null +++ b/deployment/dev/buckets/s3_buckets.tf @@ -0,0 +1,46 @@ +resource aws_s3_bucket dss_s3_bucket { + count = "${length(var.DSS_S3_BUCKET) > 0 ? 1 : 0}" + bucket = "${var.DSS_S3_BUCKET}" +} + +resource aws_s3_bucket dss_s3_bucket_test { + count = "${length(var.DSS_S3_BUCKET_TEST) > 0 ? 1 : 0}" + bucket = "${var.DSS_S3_BUCKET_TEST}" + lifecycle_rule { + id = "prune old things" + enabled = true + abort_incomplete_multipart_upload_days = 7 + expiration { + days = 7 + } + } +} + +resource aws_s3_bucket dss_s3_bucket_test_fixtures { + count = "${length(var.DSS_S3_BUCKET_TEST_FIXTURES) > 0 ? 1 : 0}" + bucket = "${var.DSS_S3_BUCKET_TEST_FIXTURES}" +} + +resource aws_s3_bucket dss_s3_checkout_bucket { + count = "${length(var.DSS_S3_CHECKOUT_BUCKET) > 0 ? 1 : 0}" + bucket = "${var.DSS_S3_CHECKOUT_BUCKET}" + + lifecycle_rule { + id = "dss_checkout_expiration" + enabled = true + + expiration { + days = 30 + } + } +} + +resource aws_s3_bucket dss_s3_checkout_bucket_test { + count = "${length(var.DSS_S3_CHECKOUT_BUCKET_TEST) > 0 ? 1 : 0}" + bucket = "${var.DSS_S3_CHECKOUT_BUCKET_TEST}" +} + +resource aws_s3_bucket dss_s3_checkout_bucket_test_fixtures { + count = "${length(var.DSS_S3_CHECKOUT_BUCKET_TEST_FIXTURES) > 0 ? 1 : 0}" + bucket = "${var.DSS_S3_CHECKOUT_BUCKET_TEST_FIXTURES}" +} diff --git a/deployment/dev/buckets/variables.tf b/deployment/dev/buckets/variables.tf new file mode 120000 index 0000000000..3a65dccd23 --- /dev/null +++ b/deployment/dev/buckets/variables.tf @@ -0,0 +1 @@ +../variables.tf \ No newline at end of file diff --git a/deployment/dev/domain/backend.tf b/deployment/dev/domain/backend.tf new file mode 100644 index 0000000000..e92c513331 --- /dev/null +++ b/deployment/dev/domain/backend.tf @@ -0,0 +1,11 @@ +{ + "terraform": { + "backend": { + "s3": { + "bucket": "org-humancellatlas-dss-config", + "key": "dss-domain-dev.tfstate", + "region": "us-east-1" + } + } + } +} \ No newline at end of file diff --git a/deployment/dev/domain/domain.tf b/deployment/dev/domain/domain.tf new file mode 100644 index 0000000000..de770fc9ad --- /dev/null +++ b/deployment/dev/domain/domain.tf @@ -0,0 +1,31 @@ +data aws_route53_zone dss_route53_zone { + name = "${var.route53_zone}" +} + +data aws_acm_certificate dss_domain_cert { + domain = "${var.certificate_domain}" + types = ["AMAZON_ISSUED"] + most_recent = true +} + +resource aws_api_gateway_domain_name dss_domain { + domain_name = "${var.API_DOMAIN_NAME}" + certificate_arn = "${data.aws_acm_certificate.dss_domain_cert.arn}" + depends_on = ["data.aws_acm_certificate.dss_domain_cert"] + provider = "aws.us-east-1" +} + +resource aws_route53_record dss_route53_record { + zone_id = "${data.aws_route53_zone.dss_route53_zone.zone_id}" + name = "${aws_api_gateway_domain_name.dss_domain.domain_name}" + type = "A" + + alias { + name = "${aws_api_gateway_domain_name.dss_domain.cloudfront_domain_name}" + zone_id = "${aws_api_gateway_domain_name.dss_domain.cloudfront_zone_id}" + evaluate_target_health = false + } + + depends_on = ["data.aws_acm_certificate.dss_domain_cert"] + provider = "aws.us-east-1" +} diff --git a/deployment/dev/domain/dss_variables.tf b/deployment/dev/domain/dss_variables.tf new file mode 120000 index 0000000000..a1c5e71c88 --- /dev/null +++ b/deployment/dev/domain/dss_variables.tf @@ -0,0 +1 @@ +../dss_variables.tf \ No newline at end of file diff --git a/deployment/dev/domain/local_variables.tf b/deployment/dev/domain/local_variables.tf new file mode 120000 index 0000000000..5467e01f5e --- /dev/null +++ b/deployment/dev/domain/local_variables.tf @@ -0,0 +1 @@ +../local_variables.tf \ No newline at end of file diff --git a/deployment/dev/domain/providers.tf b/deployment/dev/domain/providers.tf new file mode 120000 index 0000000000..7244d01e4c --- /dev/null +++ b/deployment/dev/domain/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/deployment/dev/domain/variables.tf b/deployment/dev/domain/variables.tf new file mode 120000 index 0000000000..3a65dccd23 --- /dev/null +++ b/deployment/dev/domain/variables.tf @@ -0,0 +1 @@ +../variables.tf \ No newline at end of file diff --git a/deployment/dev/dss_variables.tf b/deployment/dev/dss_variables.tf new file mode 100644 index 0000000000..7f61f0177f --- /dev/null +++ b/deployment/dev/dss_variables.tf @@ -0,0 +1,88 @@ +{ + "variable": { + "DSS_DEPLOYMENT_STAGE": { + "description": "Name of deployment.", + "default": "dev" + }, + "AWS_DEFAULT_REGION": { + "description": "AWS deployment region.", + "default": "us-east-1" + }, + "GCP_DEFAULT_REGION": { + "description": "Google infrastructure default region.", + "default": "us-central1" + }, + "DSS_S3_BUCKET": { + "description": "DSS S3 Bucket.", + "default": "org-humancellatlas-dss-dev" + }, + "DSS_S3_BUCKET_TEST": { + "description": "S3 test bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-test" + }, + "DSS_S3_BUCKET_TEST_FIXTURES": { + "description": "S3 test fixtures bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-test-fixtures" + }, + "DSS_S3_CHECKOUT_BUCKET": { + "description": "S3 checkout service bucket.", + "default": "org-humancellatlas-dss-checkout-dev" + }, + "DSS_S3_CHECKOUT_BUCKET_TEST": { + "description": "S3 checkout service test bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-checkout-test" + }, + "DSS_S3_CHECKOUT_BUCKET_TEST_FIXTURES": { + "description": "S3 checkout service test fixtures bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-checkout-test-fixtures" + }, + "DSS_GS_BUCKET": { + "description": "DSS google bucket.", + "default": "org-humancellatlas-dss-dev" + }, + "DSS_GS_BUCKET_TEST": { + "description": "DSS google test bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-test" + }, + "DSS_GS_BUCKET_TEST_FIXTURES": { + "description": "DSS google test fixtures bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-test-fixtures" + }, + "DSS_GS_CHECKOUT_BUCKET": { + "description": "GS checkout service bucket.", + "default": "org-humancellatlas-dss-checkout-dev" + }, + "DSS_GS_CHECKOUT_BUCKET_TEST": { + "description": "GS checkout service test bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-checkout-test" + }, + "DSS_GS_CHECKOUT_BUCKET_TEST_FIXTURES": { + "description": "GS checkout service test fixtures bucket. Enter \"none\" if you do not intend to run tests.", + "default": "org-humancellatlas-dss-checkout-test-fixtures" + }, + "DSS_ES_DOMAIN": { + "description": "Elasticsearch domain name.", + "default": "dss-index-dev" + }, + "API_DOMAIN_NAME": { + "description": "Domain name of your deployment (e.g. dss.dev.data.humancellatlas.org).", + "default": "dss.dev.data.humancellatlas.org" + }, + "DSS_PARAMETER_STORE": { + "description": "Name of AWS SSM parameter store used to keep event relay credentials.", + "default": "/dss/parameters" + }, + "DSS_EVENT_RELAY_AWS_USERNAME": { + "description": "AWS IAM user providing identity to the event relay", + "default": "dss-event-relay" + }, + "DSS_EVENT_RELAY_AWS_ACCESS_KEY_ID_PARAMETER_NAME": { + "description": "Event relay IAM user access key id parameter name", + "default": "event_relay_aws_access_key_id" + }, + "DSS_EVENT_RELAY_AWS_SECRET_ACCESS_KEY_PARAMETER_NAME": { + "description": "Event relay IAM user secret access key parameter name", + "default": "event_relay_aws_secret_access_key" + } + } +} diff --git a/deployment/dev/elasticsearch/backend.tf b/deployment/dev/elasticsearch/backend.tf new file mode 100644 index 0000000000..0781981544 --- /dev/null +++ b/deployment/dev/elasticsearch/backend.tf @@ -0,0 +1,11 @@ +{ + "terraform": { + "backend": { + "s3": { + "bucket": "org-humancellatlas-dss-config", + "key": "dss-elasticsearch-dev.tfstate", + "region": "us-east-1" + } + } + } +} \ No newline at end of file diff --git a/deployment/dev/elasticsearch/dss_variables.tf b/deployment/dev/elasticsearch/dss_variables.tf new file mode 120000 index 0000000000..a1c5e71c88 --- /dev/null +++ b/deployment/dev/elasticsearch/dss_variables.tf @@ -0,0 +1 @@ +../dss_variables.tf \ No newline at end of file diff --git a/deployment/dev/elasticsearch/elasticsearch.tf b/deployment/dev/elasticsearch/elasticsearch.tf new file mode 100644 index 0000000000..5a9c913dfb --- /dev/null +++ b/deployment/dev/elasticsearch/elasticsearch.tf @@ -0,0 +1,82 @@ +data aws_caller_identity current {} +data aws_region current {} + +resource aws_elasticsearch_domain elasticsearch { + count = "${length(var.DSS_ES_DOMAIN) > 0 ? 1 : 0}" + domain_name = "${var.DSS_ES_DOMAIN}" + elasticsearch_version = "5.5" + + cluster_config = { + instance_type = "m4.large.elasticsearch" + } + + advanced_options = { + rest.action.multi.allow_explicit_index = "true" + } + + ebs_options = { + ebs_enabled = "true" + volume_type = "gp2" + volume_size = "35" + } + + log_publishing_options = { + cloudwatch_log_group_arn = "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:/aws/aes/domains/${var.DSS_ES_DOMAIN}/index-logs" + log_type = "INDEX_SLOW_LOGS" + enabled = "true" + } + + log_publishing_options = { + cloudwatch_log_group_arn = "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:/aws/aes/domains/${var.DSS_ES_DOMAIN}/search-logs" + log_type = "SEARCH_SLOW_LOGS" + enabled = "true" + } + + access_policies = </dev/null 2>&1 +if [[ ! ${?} -eq 0 ]]; then + gcloud --project ${GCP_PROJECT} iam service-accounts create $ID --display-name=$ID + [[ ${?} -eq 0 ]] || exit 1 +fi + +for role in "cloudfunctions.developer" "iam.serviceAccountActor" "owner"; do + gcloud --project ${GCP_PROJECT} projects add-iam-policy-binding ${GCP_PROJECT} --member ${MEMBER} --role roles/${role} >/dev/null + [[ ${?} -eq 0 ]] || exit 1 +done + +if [[ ! -e "${GOOGLE_APPLICATION_CREDENTIALS}" ]]; then + gcloud --project ${GCP_PROJECT} iam service-accounts keys create "${GOOGLE_APPLICATION_CREDENTIALS}" --iam-account ${EMAIL} + [[ ${?} -eq 0 ]] || exit 1 +fi diff --git a/scripts/deploy_checkout_lifecycle.py b/scripts/deploy_checkout_lifecycle.py deleted file mode 100755 index 39806a66c1..0000000000 --- a/scripts/deploy_checkout_lifecycle.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python - -import os -import boto3 - -checkout_bucket = os.environ["DSS_S3_CHECKOUT_BUCKET"] -s3_client = boto3.client('s3') - -s3_client.put_bucket_lifecycle_configuration( - Bucket=checkout_bucket, - LifecycleConfiguration={ - "Rules": [ - { - 'Filter': {}, - 'Status': 'Enabled', - "Expiration": { - "Days": 30, - }, - 'ID': "dss_checkout_expiration", - }] - } -) diff --git a/scripts/destructor.py b/scripts/destructor.py new file mode 100755 index 0000000000..7a9c39c12d --- /dev/null +++ b/scripts/destructor.py @@ -0,0 +1,298 @@ +#!/usr/bin/env python + +""" +This script provides tools useful for removing some AWS and GCP services unmanaged by Terraform: + AWS: Lamdas, stepfunctions, api gateways, iam roles + GCP: service accounts, cloud functions + +The components of a DSS deployment listed above may be removed with the `destruct` command, +provided the components are suffixed with the stage name, e.g. `{component-name}-{stage}` +""" + +import json +import boto3 +import click +import subprocess + + +IAM = boto3.client('iam') +LAMBDA = boto3.client('lambda') +SFN = boto3.client('stepfunctions') +APIGATEWAY = boto3.client('apigateway') + + +def cache_filter(func): + cache = {} + def wrapped(name=None, prefix='', suffix=''): + if not cache.get(func, None): + cache[func] = func() + results = cache[func] + + if name is not None: + if name in results: + return {name: results[name]} + else: + return {} + + return {name: results[name] + for name in results + if name.startswith(prefix) and name.endswith(suffix)} + + return wrapped + + +def run(command, quiet=False): + if not quiet: + print(command) + + out = subprocess.run(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding='utf-8') + + try: + out.check_returncode() + except subprocess.CalledProcessError: + if not quiet: + print(f'\t{out.stderr}') + print(f'Exit status {out.returncode} while running "{command}"') + return out.stdout.strip() + + +def confirm(kind, name): + if not click.confirm(f'Delete {kind} {name}?'): + return False + +# if name != click.prompt(f'Enter {kind} name to confirm', default='', show_default=False): +# return False + + return True + + +@cache_filter +def deployed_roles(): + paginator = IAM.get_paginator('list_roles') + return {r['RoleName'] : r['Arn'] + for page in paginator.paginate(MaxItems=100) + for r in page['Roles']} + + +@cache_filter +def deployed_lambdas(): + paginator = LAMBDA.get_paginator('list_functions') + return {l['FunctionName'] : l['FunctionArn'] + for page in paginator.paginate(MaxItems=100) + for l in page['Functions']} + + +@cache_filter +def deployed_stepfunctions(): + paginator = SFN.get_paginator('list_state_machines') + return {sfn['name'] : sfn['stateMachineArn'] + for page in paginator.paginate(maxResults=100) + for sfn in page['stateMachines']} + + +@cache_filter +def deployed_gcp_service_accounts(): + service_accounts = json.loads(run('gcloud iam service-accounts list --format json', quiet=True)) + return {sa['email'].split('@')[0] : sa + for sa in service_accounts} + + +@cache_filter +def deployed_api_gateways(): + gateways = dict() + + for api in APIGATEWAY.get_rest_apis()['items']: + api_id = api['id'] + + for resource in APIGATEWAY.get_resources(restApiId=api_id)['items']: + try: + info = APIGATEWAY.get_integration(restApiId=api_id, resourceId=resource['id'], httpMethod='GET') + except APIGATEWAY.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'NotFoundException': + continue + else: + raise + + kind, name = info['uri'].split(':')[-2:] + name = name.split('/')[0] + gateways[name] = api_id + break + + return gateways + + +def raw_api_host(stage, prefix='dss'): + try: + resp = LAMBDA.get_policy(FunctionName=f'{prefix}-{stage}') + except LAMBDA.exceptions.ClientError: + raise Exception(f'{prefix}-{stage} not found among api gateway deployments') + + region = boto3.session.Session().region_name + policy = json.loads(resp['Policy']) + arn = policy['Statement'][0]['Condition']['ArnLike']['AWS:SourceArn'] + api_id = arn.split(':')[-1].split('/')[0] + + return f'{api_id}.execute-api.{region}.amazonaws.com/{stage}/' + + +def delete_api_gateway(name): + print() + if confirm('api gateway', name): + api_id = deployed_api_gateways().get(name, None) + if api_id is not None: + APIGATEWAY.delete_rest_api(restApiId=api_id) + print(f'deleted {name}') + + +def delete_api_gateways(name, prefix, suffix): + for name, api_id in deployed_api_gateways(name, prefix, suffix).items(): + delete_api_gateway(name) + + +def delete_role(name): + print() + if confirm('iam role', name): + IAM.delete_role_policy(RoleName=name, PolicyName=name) + IAM.delete_role(RoleName=name) + print(f'deleted {name}') + + +def delete_roles(name, prefix, suffix): + for name in deployed_roles(name, prefix, suffix): + delete_role(name) + + +def delete_lambda(name): + print() + if confirm('lambda', name): + LAMBDA.delete_function(FunctionName=name) + print(f'deleted {name}') + + +def delete_lambdas(name, prefix, suffix): + for name in deployed_lambdas(name, prefix, suffix): + delete_lambda(name) + + +def delete_stepfunction(name): + print() + if confirm('stepfunction', name): + arn = deployed_stepfunctions().get(name, None) + if arn is not None: + SFN.delete_state_machine(stateMachineArn=arn) + print(f'deleted {name}') + + +def delete_stepfunctions(name, prefix, suffix): + for name in deployed_stepfunctions(name, prefix, suffix): + delete_stepfunction(name) + + +def delete_google_cloud_function(name): + print() + functions = json.loads(run('gcloud beta functions list --format json', quiet=True)) + functions = [f['name'].split('/')[-1] for f in functions] + if name not in functions: + print(f'google cloud function {name} not deployed') + else: + run(f'gcloud beta functions delete {name} --quiet') + + +def delete_gcp_service_account(name): + print() + if not confirm('gcp service account', name): + return + + project_id = run('gcloud config get-value project', quiet=True) + email = f'{name}@{project_id}.iam.gserviceaccount.com' + member = f'serviceAccount:{email}' + policy = json.loads(run(f'gcloud projects get-iam-policy {project_id} --format json', quiet=True)) + bindings = policy['bindings'] + for b in bindings: + if member in b['members']: + role = b['role'] + run(f'gcloud projects remove-iam-policy-binding {project_id} --member {member} --role {role}') + run(f'gcloud iam service-accounts delete {email} --quiet') + + +def delete_gcp_service_accounts(name, prefix, suffix): + for name in deployed_gcp_service_accounts(name, prefix, suffix): + delete_gcp_service_account(name) + + +@click.command('delete_api_gateways') +@click.option('--name', default=None) +@click.option('--prefix', default='') +@click.option('--suffix', default='') +def delete_api_gateways_command(name, prefix, suffix): + delete_api_gateways(name, prefix, suffix) + + +@click.command('delete_lambdas') +@click.option('--name', default=None) +@click.option('--prefix', default='') +@click.option('--suffix', default='') +def delete_lambdas_command(name, prefix, suffix): + delete_lambdas(name, prefix, suffix) + + +@click.command('delete_stepfunctions') +@click.option('--name', default=None) +@click.option('--prefix', default='') +@click.option('--suffix', default='') +def delete_stepfunctions_command(name, prefix, suffix): + delete_stepfunctions(name, prefix, suffix) + + +@click.command('delete_roles') +@click.option('--name', default=None) +@click.option('--prefix', default='') +@click.option('--suffix', default='') +def delete_roles_command(name, prefix, suffix): + delete_roles(name, prefix, suffix) + + +@click.command('delete_gcp_service_accounts') +@click.option('--name', default=None) +@click.option('--prefix', default='') +@click.option('--suffix', default='') +def delete_gcp_service_accounts_command(name, prefix, suffix): + delete_gcp_service_accounts(name, prefix, suffix) + + +@click.command('destruct') +@click.argument('stage') +def destruct_command(stage): + delete_api_gateways(name=None, prefix='', suffix=stage) + delete_lambdas(name=None, prefix='', suffix=stage) + delete_stepfunctions(name=None, prefix='', suffix=stage) + delete_roles(name=None, prefix='', suffix=stage) + delete_gcp_service_accounts(name=None, prefix='', suffix=stage) + delete_google_cloud_function(f'dss-gs-event-relay-{stage}') + + +@click.command('raw_api_host') +@click.argument('stage') +@click.option('--prefix', default='dss') +def raw_api_host_command(stage, prefix): + print(raw_api_host(stage, prefix)) + + +@click.group() +def cli(): + pass + + +if __name__ == "__main__": + cli.add_command(destruct_command) + cli.add_command(delete_api_gateways_command) + cli.add_command(delete_lambdas_command) + cli.add_command(delete_stepfunctions_command) + cli.add_command(delete_roles_command) + cli.add_command(delete_gcp_service_accounts_command) + cli.add_command(raw_api_host_command) + cli() diff --git a/scripts/enable_gs_services.sh b/scripts/enable_gs_services.sh new file mode 100755 index 0000000000..a954b4b7ea --- /dev/null +++ b/scripts/enable_gs_services.sh @@ -0,0 +1,10 @@ +#!/bin/bash -x + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done + +DSS_HOME="$(cd -P "$(dirname "$SOURCE")/.." && pwd)" +GCP_PROJECT=$(cat ${DSS_HOME}/deployment/active/variables.tf | jq -r '.variable["gcp_project"]["default"]') + +gcloud --project ${GCP_PROJECT} services enable cloudfunctions.googleapis.com +gcloud --project ${GCP_PROJECT} services enable runtimeconfig.googleapis.com diff --git a/scripts/set_apigateway_base_path_mapping.py b/scripts/set_apigateway_base_path_mapping.py new file mode 100755 index 0000000000..c0b59bc1b9 --- /dev/null +++ b/scripts/set_apigateway_base_path_mapping.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +import os +import sys +import json +import boto3 + +stage = os.environ['DSS_DEPLOYMENT_STAGE'] +domain_name = os.environ['API_DOMAIN_NAME'] + +APIGATEWAY = boto3.client('apigateway') +LAMBDA = boto3.client('lambda') +lambda_name = f'dss-{stage}' + +lambda_arn = None +paginator = LAMBDA.get_paginator('list_functions') +for page in paginator.paginate(): + for l in page['Functions']: + if lambda_name == l['FunctionName']: + lambda_arn = l['FunctionArn'] + break + +if not lambda_arn: + raise Exception(f'Lambda function {lambda_name} not found. Did you run `make deploy`?') + +policy = json.loads( + LAMBDA.get_policy(FunctionName=lambda_name)['Policy'] +) + +source_arn = policy['Statement'][0]['Condition']['ArnLike']['AWS:SourceArn'] +api_id = source_arn.split(':')[5].split('/')[0] + +try: + APIGATEWAY.create_base_path_mapping( + domainName=domain_name, + restApiId=api_id, + stage=stage + ) +except APIGATEWAY.exceptions.ConflictException: + pass diff --git a/scripts/set_event_relay_parameters.py b/scripts/set_event_relay_parameters.py index 4033a41799..55cf5131d3 100755 --- a/scripts/set_event_relay_parameters.py +++ b/scripts/set_event_relay_parameters.py @@ -1,29 +1,54 @@ #!/usr/bin/env python + +# Terraform scripts may be used to generate the access key id and secret access key. +# However, the secret access key will be either stored unencrypted in the Terraform state file +# or encrypted with the use of a PGP public key. In the latter case, decription will be +# required to use the secret access key, requiring the PGP secret key -- which would +# then need to be shared among operators. +# +# For now, it is preferable to grab the keys with an AWS IAM API call and store them encrypted +# into the SSM parameter store. +# +# Terraform aws_iam_access_key docs: +# https://www.terraform.io/docs/providers/aws/r/iam_access_key.html + import os +import sys import click import boto3 -SSM = boto3.client('ssm') -IAM = boto3.client('iam') -username = os.environ['DSS_EVENT_RELAY_AWS_USERNAME'] -parameter_store = os.environ['DSS_PARAMETER_STORE'] -access_key_id_parameter_name = os.environ['DSS_EVENT_RELAY_AWS_ACCESS_KEY_ID_PARAMETER_NAME'] -secret_access_key_parameter_name = os.environ['DSS_EVENT_RELAY_AWS_SECRET_ACCESS_KEY_PARAMETER_NAME'] +pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa +sys.path.append(pkg_root) -key_info = IAM.create_access_key( - UserName=username -) +import dss_deployment +active = dss_deployment.active() -SSM.put_parameter( - Name=f'{parameter_store}/{access_key_id_parameter_name}', - Value=key_info['AccessKey']['AccessKeyId'], - Type='SecureString', - Overwrite=True -) +SSM = boto3.client('ssm') +IAM = boto3.client('iam') +username = active.value('DSS_EVENT_RELAY_AWS_USERNAME') +parameter_store = active.value('DSS_PARAMETER_STORE') +access_key_id_parameter_name = active.value('DSS_EVENT_RELAY_AWS_ACCESS_KEY_ID_PARAMETER_NAME') +secret_access_key_parameter_name = active.value('DSS_EVENT_RELAY_AWS_SECRET_ACCESS_KEY_PARAMETER_NAME') -SSM.put_parameter( - Name=f'{parameter_store}/{secret_access_key_parameter_name}', - Value=key_info['AccessKey']['SecretAccessKey'], - Type='SecureString', - Overwrite=True -) +try: + SSM.get_parameter( + Name=f'{parameter_store}/{access_key_id_parameter_name}' + ) +except SSM.exceptions.ParameterNotFound: + key_info = IAM.create_access_key( + UserName=username + ) + + SSM.put_parameter( + Name=f'{parameter_store}/{access_key_id_parameter_name}', + Value=key_info['AccessKey']['AccessKeyId'], + Type='SecureString', + Overwrite=True + ) + + SSM.put_parameter( + Name=f'{parameter_store}/{secret_access_key_parameter_name}', + Value=key_info['AccessKey']['SecretAccessKey'], + Type='SecureString', + Overwrite=True + )