diff --git a/deploy/Makefile b/deploy/Makefile index 6309a6b6f8..ec0b9f72c3 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -57,6 +57,16 @@ prod: --profile si \ up --detach +prod-service: + GATEWAY=$(shell $(MAKEPATH)/scripts/gateway.sh) \ + docker-compose \ + -f $(MAKEPATH)/docker-compose.yml \ + -f $(MAKEPATH)/docker-compose.env-static.yml \ + -f $(MAKEPATH)/docker-compose.pganalyze.yml \ + -f $(MAKEPATH)/docker-compose.prod.yml \ + --profile si \ + up + web: init # REPOPATH=$(REPOPATH) $(MAKEPATH)/scripts/check-for-artifacts-before-mounting.sh $(MAKEPATH)/scripts/generate-ci-yml.sh $(CI_FROM_REF) $(CI_TO_REF) diff --git a/deploy/docker-compose.env-static.yml b/deploy/docker-compose.env-static.yml new file mode 100644 index 0000000000..321bf11df4 --- /dev/null +++ b/deploy/docker-compose.env-static.yml @@ -0,0 +1,15 @@ +version: "3" +services: + pinga: + volumes: + - "/etc/dev.encryption.key:/run/pinga/cyclone_encryption.key:ro" + sdf: + volumes: + - "/etc/jwt_secret_key.bin:/run/sdf/jwt_secret_key.bin" + - "/etc/dev.encryption.key:/run/sdf/cyclone_encryption.key:ro" + veritech: + volumes: + - "/etc/dev.decryption.key:/run/cyclone/decryption.key:ro" + otel: + env_file: + - "/etc/honeycomb_env" \ No newline at end of file diff --git a/research/staging_host/.gitignore b/research/staging_host/.gitignore new file mode 100644 index 0000000000..6ffaf9c6e4 --- /dev/null +++ b/research/staging_host/.gitignore @@ -0,0 +1,3 @@ +.terraform +terraform.tfstate* +*.ign \ No newline at end of file diff --git a/research/staging_host/README.md b/research/staging_host/README.md new file mode 100644 index 0000000000..bba816d005 --- /dev/null +++ b/research/staging_host/README.md @@ -0,0 +1,24 @@ +# Staging Host Deployment Documentation + +The files in this folder allow you to deploy an EC2 +instance that automatically deploy the latest versions +of SI's containers, resetting the env on every update. + +Right now, it's only bringing up a coreos instance with +SI's containers on startup, but no auto-update via watchtower. + +It can be started by, while on the folder containing this file, +running: + +``` +butane staging-1.yaml --pretty --strict --files-dir ../../ > staging-1.ign +terraform apply -auto-approve +``` + +The way it's working right now, butane copies the deployment +docker compose files and makefile onto the server, +and executes it. The idea would be to, in the future, +execute each server via its own systemd unit, and have +watchtower setup with a pre update +[lifecycle hook](https://containrrr.dev/watchtower/lifecycle-hooks/) +that wipes all the data whenever sdf or the dal get updated \ No newline at end of file diff --git a/research/staging_host/scripts/docker-auth.sh b/research/staging_host/scripts/docker-auth.sh new file mode 100755 index 0000000000..c8deeb6d6c --- /dev/null +++ b/research/staging_host/scripts/docker-auth.sh @@ -0,0 +1,8 @@ +#!/bin/bash +credential=$(/usr/bin/aws secretsmanager get-secret-value --secret-id dockerhub_readonly --region us-east-2 \ + | /bin/jq --raw-output '.SecretString') + +user=$(echo "$credential" | /bin/jq --raw-output '.User') +pass=$(echo "$credential" | /bin/jq --raw-output '.Password') + +echo "$pass" | /usr/bin/docker login --username "$user" --password-stdin diff --git a/research/staging_host/scripts/load-keys.sh b/research/staging_host/scripts/load-keys.sh new file mode 100755 index 0000000000..1df76199c0 --- /dev/null +++ b/research/staging_host/scripts/load-keys.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Production variable set +AWS=/usr/bin/aws +JQ=/bin/jq +BASE64=/usr/bin/base64 +honeycomb_env=/etc/honeycomb_env +decryption_file=/etc/dev.decryption.key +encryption_file=/etc/dev.encryption.key +jwt_secret_file=/etc/jwt_secret_key.bin + + +# Development variable set +#AWS=aws +#JQ=jq +#BASE64=base64 +# The following declarations point to the stdout file descriptor, making the system print out the values +#honeycomb_env=/dev/fd/1 +#decryption_file=/dev/fd/1 +#encryption_file=/dev/fd/1 +#jwt_secret_file=/dev/fd/1 + + +honeycomb=$($AWS secretsmanager get-secret-value --secret-id staging/honeycomb --region us-east-2 \ + | $JQ --raw-output '.SecretString') + +token=$(echo "$honeycomb" | $JQ --raw-output '.token') +dataset=$(echo "$honeycomb" | $JQ --raw-output '.dataset') + +echo "HONEYCOMB_TOKEN=$token" > $honeycomb_env +echo "HONEYCOMB_DATASET=$dataset" >> $honeycomb_env + +keys=$($AWS secretsmanager get-secret-value --secret-id staging/keys --region us-east-2 \ + | $JQ --raw-output '.SecretString') + +echo "$keys" | $JQ --raw-output '.decryption' | $BASE64 -d > $decryption_file +echo "$keys" | $JQ --raw-output '.encryption' | $BASE64 -d > $encryption_file +echo "$keys" | $JQ --raw-output '.jwt_secret_key' | $BASE64 -d > $jwt_secret_file diff --git a/research/staging_host/staging-1.yaml b/research/staging_host/staging-1.yaml new file mode 100644 index 0000000000..e5a45c5d79 --- /dev/null +++ b/research/staging_host/staging-1.yaml @@ -0,0 +1,177 @@ +variant: fcos +version: 1.4.0 +storage: + trees: + files: + - path: /etc/hostname + contents: + inline: staging-1 + - path: /usr/local/bin/docker-auth.sh + mode: 0755 + contents: + local: environments/aws/staging_host/scripts/docker-auth.sh + - path: /usr/local/bin/load-keys.sh + mode: 0755 + contents: + local: environments/aws/staging_host/scripts/load-keys.sh + - path: /opt/deploy/Makefile + mode: 0755 + contents: + local: deploy/Makefile + - path: /opt/deploy/scripts/gateway.sh + mode: 0755 + contents: + local: deploy/scripts/gateway.sh + - path: /opt/deploy/docker-compose.yml + contents: + local: deploy/docker-compose.yml + - path: /opt/deploy/docker-compose.env-static.yml + contents: + local: deploy/docker-compose.env-static.yml + - path: /opt/deploy/docker-compose.pganalyze.yml + contents: + local: deploy/docker-compose.pganalyze.yml + - path: /opt/deploy/docker-compose.prod.yml + contents: + local: deploy/docker-compose.prod.yml +systemd: + units: + # installing aws-cli as a layered package with rpm-ostree + - name: layer-awscli.service + enabled: true + contents: | + [Unit] + Description=Install AWS cli + Wants=network-online.target + After=network-online.target + + # We run before `zincati.service` to avoid conflicting rpm-ostree + # transactions. - https://docs.fedoraproject.org/en-US/fedora-coreos/os-extensions/ + Before=zincati.service + + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive --idempotent awscli + + [Install] + WantedBy=multi-user.target + - name: layer-make.service + enabled: true + contents: | + [Unit] + Description=Install Make + Wants=network-online.target + After=network-online.target + + After=layer-awscli.service + Before=zincati.service + + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive --idempotent make + + [Install] + WantedBy=multi-user.target + - name: layer-docker-compose.service + enabled: true + contents: | + [Unit] + Description=Install docker-compose + Wants=network-online.target + After=network-online.target + + After=layer-make.service + Before=zincati.service + + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive --idempotent docker-compose + + [Install] + WantedBy=multi-user.target + - name: docker-auth.service + enabled: true + contents: | + [Unit] + Description=Authenticate to container image repository + After=layer-awscli.service + Requires=layer-awscli.service + + [Service] + Type=oneshot + RemainAfterExit=yes + TimeoutStartSec=30s + ExecStart=/usr/local/bin/docker-auth.sh + + [Install] + WantedBy=multi-user.target + - name: load-keys.service + enabled: true + contents: | + [Unit] + Description=Set + After=layer-awscli.service + Requires=layer-awscli.service + + [Service] + Type=oneshot + RemainAfterExit=yes + TimeoutStartSec=30s + ExecStart=/usr/local/bin/load-keys.sh + + [Install] + WantedBy=multi-user.target + - name: deployment.service + enabled: true + contents: | + [Unit] + Description=System Initiative Deployment + After=network-online.target + Wants=network-online.target + + After=layer-make.service + Requires=layer-make.service + After=layer-docker-compose.service + Requires=layer-docker-compose.service + + After=docker-auth.service + Requires=docker-auth.service + After=load-keys.service + Requires=load-keys.service + + [Service] + TimeoutStartSec=60s + WorkingDirectory=/opt/deploy + ExecStart=make prod-service + + [Install] + WantedBy=multi-user.target +# - name: watchtower.service +# enabled: true +# contents: | +# [Unit] +# After=network-online.target +# Wants=network-online.target +# +# After=deployment.service +# Requires=deployment.service +# +# +# [Service] +# ExecStartPre=-/usr/bin/docker kill whiskers1 +# ExecStartPre=-/usr/bin/docker rm whiskers1 +# ExecStart=/usr/bin/docker run --name watchtower \ +# -v /var/run/docker.sock:/var/run/docker.sock docker.io/containrrr/watchtower \ +# -v /root/.docker/config.json:/config.json \ +# --interval 30 --label-enable \ +# containrrr/watchtower +# +# [Install] +# WantedBy=multi-user.target + diff --git a/research/staging_host/staging.tf b/research/staging_host/staging.tf new file mode 100644 index 0000000000..2b754ba01b --- /dev/null +++ b/research/staging_host/staging.tf @@ -0,0 +1,38 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } +} + +provider "aws" { + region = "us-east-2" +} + +data "local_file" "ignition" { + filename = "${path.module}/staging-1.ign" +} + +resource "aws_instance" "staging-1" { + ami = "ami-0e6f4ffb61e585c76" + instance_type = "t3.medium" + subnet_id = "subnet-07d580fee7a806230" + vpc_security_group_ids = ["sg-0d0be672e4485feb4"] + key_name = "si_key" + iam_instance_profile = "veritech-ec2" + + user_data = data.local_file.ignition.content + + tags = { + Name = "staging-1" + Environment = "staging" + Terraform = "true" + } +} + +resource "aws_eip_association" "eip_association" { + instance_id = aws_instance.staging-1.id + allocation_id = "eipalloc-0f8bdc206768cb6a7" +} \ No newline at end of file