From 536254d44a07d08f3e005873c40480d3c42fa00b Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 10:37:07 +0100 Subject: [PATCH 01/15] Added more generic firewall rules that might be useful. --- .../debian/firewall_config/defaults/main.yml | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/roles/debian/firewall_config/defaults/main.yml b/roles/debian/firewall_config/defaults/main.yml index 7c9193af6..4ccc5600f 100644 --- a/roles/debian/firewall_config/defaults/main.yml +++ b/roles/debian/firewall_config/defaults/main.yml @@ -13,6 +13,7 @@ firewall_config: rulesets: - ssh_open - web_open + - common_network # rule always needs to be last so the DROP rules in the OUTPUT chain get applied at the end # Ruleset definitions # Permitted rule lists @@ -43,6 +44,29 @@ firewall_config: letsencrypt: firewall_allowed_tcp_ports: - "80" + # Standard ports for Prometheus outbound rules to allow scraping of exporters + prometheus_server_scraping: + firewall_additional_rules: + - "iptables -A OUTPUT -p tcp --dport 9100 -j ACCEPT" # allow scraping node exporter + - "iptables -A OUTPUT -p tcp --dport 9101 -j ACCEPT" # allow scraping process exporter + - "iptables -A OUTPUT -p tcp --dport 9093 -j ACCEPT" # allow posting to alertmanager + - "iptables -A OUTPUT -p tcp --dport 9115 -j ACCEPT" # allow scraping blackbox exporter + # Commonly required outbound ports for PHP web servers + common_web: + firewall_additional_rules: + - "iptables -A OUTPUT -p tcp --dport 2049 -j ACCEPT" # allow NFS + - "iptables -A OUTPUT -p udp --dport 2049 -j ACCEPT" # allow NFS + - "iptables -A OUTPUT -p tcp --dport 3306 -j ACCEPT" # allow MySQL + # Recommended general firewall settings + common_network: + firewall_additional_rules: + - "iptables -A INPUT -p icmp --icmp-type 8 -s 0/0 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT" # ICMP ping in + - "iptables -A INPUT -p icmp --icmp-type 128 -s 0/0 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT" # ICMP ping in + - "iptables -A OUTPUT -p icmp --icmp-type 0 -d 0/0 -m state --state ESTABLISHED,RELATED -j ACCEPT" # ICMP ping out + - "iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT" # established connections out + - "iptables -A OUTPUT -o lo -j ACCEPT" # allow all local traffic + - "iptables -A OUTPUT -p tcp --dport 1025:65535 -j DROP" # block high port tcp traffic outbound + - "iptables -A OUTPUT -p udp --dport 1025:65535 -j DROP" # block high port udp traffic outbound ossec: firewall_allowed_udp_ports: - "1514" From d0cf00e2ba9137e936947448a3de954d2a936c04 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 10:37:25 +0100 Subject: [PATCH 02/15] Added an update step to the installer. --- install.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/install.sh b/install.sh index 0776988a7..94fc15548 100755 --- a/install.sh +++ b/install.sh @@ -161,7 +161,9 @@ if [ ! -d "/home/$CONTROLLER_USER/ce-provision" ]; then /usr/bin/su - "$CONTROLLER_USER" -c "git clone --branch $CONFIG_REPO_BRANCH $CONFIG_REPO /home/$CONTROLLER_USER/ce-provision/config" /usr/bin/su - "$CONTROLLER_USER" -c "/usr/bin/ln -s /home/$CONTROLLER_USER/ce-provision/config/ansible.cfg /home/$CONTROLLER_USER/ce-provision/ansible.cfg" else - /usr/bin/echo "ce-provision directory at /home/$CONTROLLER_USER/ce-provision already exists. Skipping." + /usr/bin/echo "ce-provision directory at /home/$CONTROLLER_USER/ce-provision already exists. Updating." + /usr/bin/su - "$CONTROLLER_USER" -c "cd /home/$CONTROLLER_USER/ce-provision && git pull origin $VERSION" + /usr/bin/su - "$CONTROLLER_USER" -c "cd /home/$CONTROLLER_USER/ce-provision/config && git pull origin $CONFIG_REPO_BRANCH" /usr/bin/echo "-------------------------------------------------" fi /usr/bin/mkdir -p "/home/$CONTROLLER_USER/ce-provision/galaxy/roles" From edb18c24f107f53db530f8b9beeb22b31fb00f69 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 13:04:46 +0100 Subject: [PATCH 03/15] Providing some default playbooks people can use in ce-provision. --- plays/aws_account/README.md | 3 ++ plays/aws_account/aws_account.yml | 16 +++++++ plays/aws_asg/README.md | 14 ++++++ plays/aws_asg/ami.yml | 35 ++++++++++++++ plays/aws_asg/asg.yml | 25 ++++++++++ plays/aws_asg/cluster.yml | 5 ++ plays/aws_ec2_standalone/README.md | 8 ++++ plays/aws_ec2_standalone/ami.yml | 20 ++++++++ plays/aws_ec2_standalone/ec2.yml | 24 ++++++++++ plays/aws_ec2_standalone/launch.yml | 38 +++++++++++++++ plays/aws_ec2_standalone/mysql_client.yml | 32 +++++++++++++ plays/aws_ec2_standalone/provision.yml | 29 ++++++++++++ plays/aws_ec2_standalone/rds.yml | 56 +++++++++++++++++++++++ plays/aws_ec2_standalone/server.yml | 11 +++++ plays/aws_region/README.md | 3 ++ plays/aws_region/aws_region.yml | 17 +++++++ plays/controller/README.md | 31 +++++++++++++ plays/controller/aws_controller.yml | 7 +++ plays/controller/provision.yml | 22 +++++++++ plays/deploy/README.md | 31 +++++++++++++ plays/deploy/aws_deploy.yml | 7 +++ plays/deploy/provision.yml | 22 +++++++++ roles/_init/defaults/main.yml | 6 +++ 23 files changed, 462 insertions(+) create mode 100644 plays/aws_account/README.md create mode 100644 plays/aws_account/aws_account.yml create mode 100644 plays/aws_asg/README.md create mode 100644 plays/aws_asg/ami.yml create mode 100644 plays/aws_asg/asg.yml create mode 100644 plays/aws_asg/cluster.yml create mode 100644 plays/aws_ec2_standalone/README.md create mode 100644 plays/aws_ec2_standalone/ami.yml create mode 100644 plays/aws_ec2_standalone/ec2.yml create mode 100644 plays/aws_ec2_standalone/launch.yml create mode 100644 plays/aws_ec2_standalone/mysql_client.yml create mode 100644 plays/aws_ec2_standalone/provision.yml create mode 100644 plays/aws_ec2_standalone/rds.yml create mode 100644 plays/aws_ec2_standalone/server.yml create mode 100644 plays/aws_region/README.md create mode 100644 plays/aws_region/aws_region.yml create mode 100644 plays/controller/README.md create mode 100644 plays/controller/aws_controller.yml create mode 100644 plays/controller/provision.yml create mode 100644 plays/deploy/README.md create mode 100644 plays/deploy/aws_deploy.yml create mode 100644 plays/deploy/provision.yml diff --git a/plays/aws_account/README.md b/plays/aws_account/README.md new file mode 100644 index 000000000..b47a9d428 --- /dev/null +++ b/plays/aws_account/README.md @@ -0,0 +1,3 @@ +# Base playbook for configuring an AWS account. + +@TODO provide example infra repo for use with the AWS EC2 inventory plugin. diff --git a/plays/aws_account/aws_account.yml b/plays/aws_account/aws_account.yml new file mode 100644 index 000000000..f6184b34c --- /dev/null +++ b/plays/aws_account/aws_account.yml @@ -0,0 +1,16 @@ +--- +# Global infra setup. +- hosts: localhost + connection: local + become: false + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + # used for tagging + _profile: core + _env_type: core + roles: + - _init + - _meta/aws_account + - _exit diff --git a/plays/aws_asg/README.md b/plays/aws_asg/README.md new file mode 100644 index 000000000..9ee9b5757 --- /dev/null +++ b/plays/aws_asg/README.md @@ -0,0 +1,14 @@ +# Base playbooks for creating a new AWS ASG. +For a standard ASG build just add `cluster.yml` to your environment play, like this: + +```yaml +- import_playbook: "{{ _ce_provision_base_dir }}/plays/aws_asg/cluster.yml" + vars: + _aws_region: eu-west-1 + _env_type: dev + _aws_resource_name: cluster-acme-com +``` + +If you have specific requirements for your AMIs you can copy these plays to your infra repository and alter them accordingly. Don't forget to copy/include `launch.yml` from the `_ec2_standalone` plays or orchestration of brand new clusters will fail. + +@TODO provide example infra repo for use with the AWS EC2 inventory plugin. diff --git a/plays/aws_asg/ami.yml b/plays/aws_asg/ami.yml new file mode 100644 index 000000000..5a82deac5 --- /dev/null +++ b/plays/aws_asg/ami.yml @@ -0,0 +1,35 @@ +--- +# This is the provisioning for the AMI and will run inside a temporary instance using Packer. +- hosts: default + become: true + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + _profile: asg + + tasks: + - name: Upgrade the system and update cache + ansible.builtin.apt: + upgrade: dist + update_cache: true + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: _meta/aws_client_instance + - ansible.builtin.import_role: + name: _meta/webserver + - ansible.builtin.import_role: + name: debian/aws_efs_client + - ansible.builtin.import_role: + name: debian/squashfs + - ansible.builtin.import_role: + name: debian/mount_sync + - ansible.builtin.import_role: + name: debian/swap + - ansible.builtin.import_role: + name: _exit diff --git a/plays/aws_asg/asg.yml b/plays/aws_asg/asg.yml new file mode 100644 index 000000000..9a1367869 --- /dev/null +++ b/plays/aws_asg/asg.yml @@ -0,0 +1,25 @@ +--- +# Common ASG infra. +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + connection: local + become: false + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + _profile: asg + + tasks: + - ansible.builtin.import_role: + name: _init + run_once: true + - ansible.builtin.import_role: + name: aws/aws_ec2_autoscale_cluster + run_once: true + - ansible.builtin.import_role: + name: _exit + run_once: true diff --git a/plays/aws_asg/cluster.yml b/plays/aws_asg/cluster.yml new file mode 100644 index 000000000..9a2f93f03 --- /dev/null +++ b/plays/aws_asg/cluster.yml @@ -0,0 +1,5 @@ +--- +# Creates hosts entry so play isn't skipped. +- ansible.builtin.import_playbook: ../aws_ec2_standalone/launch.yml +# Spins up the cluster. +- ansible.builtin.import_playbook: asg.yml diff --git a/plays/aws_ec2_standalone/README.md b/plays/aws_ec2_standalone/README.md new file mode 100644 index 000000000..ea577ab1c --- /dev/null +++ b/plays/aws_ec2_standalone/README.md @@ -0,0 +1,8 @@ +# Base playbook for setting up a standalone EC2 instance. +IMPORTANT: these plays deliberately exclude the `_init._profile` variable because it needs to be set in the infra repo. + +The `server.yml` file is the 'main' play but this needs to copied to the infra repo and renamed as `hostname.yml` e.g. `acme-dev1.yml`. You also need to copy `provision.yml` so you can control what is provisioned. The `provision.yml` file is intended only as a model. + +If you want a separate RDS instance to pair with your EC2 instance then uncomment the last two play import lines in `server.yml`, however note you do need to sort out outbound firewall ports in iptables and a Security Group for inbound traffic to the RDS instance - usually port `3306` outbound from the EC2 instance in `firewall_config` and an SG that allows `3306` inbound to RDS. + +@TODO provide example infra repo for use with the AWS EC2 inventory plugin. diff --git a/plays/aws_ec2_standalone/ami.yml b/plays/aws_ec2_standalone/ami.yml new file mode 100644 index 000000000..e2fa4297d --- /dev/null +++ b/plays/aws_ec2_standalone/ami.yml @@ -0,0 +1,20 @@ +--- +# This is the bare provisioning for the AMI. +- hosts: default + become: true + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + + tasks: + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: debian/user_provision + - ansible.builtin.import_role: + name: _exit diff --git a/plays/aws_ec2_standalone/ec2.yml b/plays/aws_ec2_standalone/ec2.yml new file mode 100644 index 000000000..84b2bcfbc --- /dev/null +++ b/plays/aws_ec2_standalone/ec2.yml @@ -0,0 +1,24 @@ +--- +# First step. Spin up a "blank" instance from a fresh AMI. +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + connection: local + become: false + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + + tasks: + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: aws/aws_ami + - ansible.builtin.import_role: + name: aws/aws_ec2_with_eip + - ansible.builtin.import_role: + name: _exit + - ansible.builtin.meta: refresh_inventory diff --git a/plays/aws_ec2_standalone/launch.yml b/plays/aws_ec2_standalone/launch.yml new file mode 100644 index 000000000..2d9f13ac6 --- /dev/null +++ b/plays/aws_ec2_standalone/launch.yml @@ -0,0 +1,38 @@ +--- +# Prepare the ground for a new EC2 machine +- hosts: localhost + connection: local + become: false + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + # copied from aws_ami.yml in group_vars/all because we do not want to load aws_ami vars yet + ami_groups: + - "all" + - "_{{ _aws_resource_name | regex_replace('-', '_') }}" + - "_{{ _infra_name | regex_replace('-', '_') }}" + - "_{{ _env_type | regex_replace('-', '_') }}" + + tasks: + - ansible.builtin.import_role: + name: _init + - name: Blank the _aws_hostname variable. + ansible.builtin.set_fact: + _aws_hostname: "" + - name: Check to see if an Ansible host exists. + ansible.builtin.set_fact: + _aws_hostname: "{{ item }}" + with_inventory_hostnames: + - "_{{ _aws_resource_name | regex_replace('-', '_') }}" + - name: If an Ansible host is not found, create it so we can execute EC2 orchestration. + ansible.builtin.add_host: + name: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + groups: "{{ ami_groups }}" + when: _aws_hostname | length == 0 + - ansible.builtin.import_role: + name: _exit diff --git a/plays/aws_ec2_standalone/mysql_client.yml b/plays/aws_ec2_standalone/mysql_client.yml new file mode 100644 index 000000000..b39135fbc --- /dev/null +++ b/plays/aws_ec2_standalone/mysql_client.yml @@ -0,0 +1,32 @@ +--- +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + become: true + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + + tasks: + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: debian/user_deploy + # Look up RDS hostname + - name: Get information about an instance + community.aws.rds_instance_info: + region: "{{ _aws_region }}" + profile: "{{ _aws_profile }}" + db_instance_identifier: "{{ _aws_resource_name }}" + become: true + become_user: "{{ user_provision.username }}" + delegate_to: localhost # needs to run on controller + register: _database_info + # Install MySQL client + - ansible.builtin.import_role: + name: debian/mysql_client + - ansible.builtin.import_role: + name: _exit diff --git a/plays/aws_ec2_standalone/provision.yml b/plays/aws_ec2_standalone/provision.yml new file mode 100644 index 000000000..bbe70e8dd --- /dev/null +++ b/plays/aws_ec2_standalone/provision.yml @@ -0,0 +1,29 @@ +--- +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + become: true + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + + tasks: + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: ce_ldap_safelist + - ansible.builtin.import_role: + name: _meta/common_base + - ansible.builtin.import_role: + name: _meta/aws_client_instance + - ansible.builtin.import_role: + name: debian/ssh_server + - ansible.builtin.import_role: + name: debian/firewall_config + - ansible.builtin.import_role: + name: debian/swap + - ansible.builtin.import_role: + name: _exit diff --git a/plays/aws_ec2_standalone/rds.yml b/plays/aws_ec2_standalone/rds.yml new file mode 100644 index 000000000..5815bb2dd --- /dev/null +++ b/plays/aws_ec2_standalone/rds.yml @@ -0,0 +1,56 @@ +--- +# Create an RDS instance. +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + connection: local + become: false + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + + tasks: + - ansible.builtin.import_role: + name: _init + + # Automate subnet fetching + - name: Create empty var to hold subnet IDs. + ansible.builtin.set_fact: + _aws_rds_vpc_subnet_ids: [] + + - name: Gather VPC information. + amazon.aws.ec2_vpc_net_info: + profile: "{{ aws_rds.aws_profile }}" + region: "{{ aws_rds.region }}" + filters: + "tag:Name": "{{ _infra_name }}" + register: _aws_rds_vpc + + - name: Set the VPC id from name. + ansible.builtin.set_fact: + _aws_rds_vpc_id: "{{ _aws_rds_vpc.vpcs[0].vpc_id }}" + + - name: Gather public subnet information. + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ aws_rds.aws_profile }}" + region: "{{ aws_rds.region }}" + filters: + vpc-id: "{{ _aws_rds_vpc_id }}" + tag:Env: "{{ _env_type }}" + tag:Profile: "core" + register: _aws_rds_vpc_subnets + + - name: Place subnet IDs in a list. + ansible.builtin.set_fact: + _aws_rds_vpc_subnet_ids: "{{ _aws_rds_vpc_subnet_ids + [item.subnet_id] }}" + loop: "{{ _aws_rds_vpc_subnets.subnets }}" + + # Build the RDS instance. + - ansible.builtin.import_role: + name: aws/aws_rds + + - ansible.builtin.import_role: + name: _exit diff --git a/plays/aws_ec2_standalone/server.yml b/plays/aws_ec2_standalone/server.yml new file mode 100644 index 000000000..282a73f16 --- /dev/null +++ b/plays/aws_ec2_standalone/server.yml @@ -0,0 +1,11 @@ +# Prepares a host entry so the ec2.yml play succeeds. +- ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/aws_ec2_standalone/launch.yml" +# Spins up the instance. +# We use the central _deploy role to provision the EC2 instance to avoid duplication. +- ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/aws_ec2_standalone/ec2.yml" +# Actual provisioning +- ansible.builtin.import_playbook: provision.yml +# RDS instance +#- ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/aws_ec2_standalone/rds.yml" +# MySQL client - needs to happen after RDS instance is created +#- ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/aws_ec2_standalone/mysql_client.yml" diff --git a/plays/aws_region/README.md b/plays/aws_region/README.md new file mode 100644 index 000000000..8073b43fc --- /dev/null +++ b/plays/aws_region/README.md @@ -0,0 +1,3 @@ +# Base playbook for configuring an AWS region. + +@TODO provide example infra repo for use with the AWS EC2 inventory plugin. diff --git a/plays/aws_region/aws_region.yml b/plays/aws_region/aws_region.yml new file mode 100644 index 000000000..38974eeda --- /dev/null +++ b/plays/aws_region/aws_region.yml @@ -0,0 +1,17 @@ +--- +# Global infra setup. +- hosts: localhost + connection: local + become: false + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + # used for tagging + _profile: core + roles: + - _init + - _meta/aws_region + - _exit diff --git a/plays/controller/README.md b/plays/controller/README.md new file mode 100644 index 000000000..f27e06a85 --- /dev/null +++ b/plays/controller/README.md @@ -0,0 +1,31 @@ +# Base playbook for setting up an infra controller. +This playbook provides a model for managing an Ansible infra controller with ce-provision based at AWS. + +If your server is not in AWS or you are not using the AWS EC2 inventory plugin, you must ensure your server's hostname is in your Ansible hosts file (`config/hosts/hosts` or `hosts.yml`) and provide the same hostname in the `_provision_host` variable. Then call `provision.yml` directly, for example: + +```yaml +--- +- name: Configure my controller server. + ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/controller/provision.yml" + vars: + _env_type: util + _provision_host: controller.acme.com + _profile: controller +``` + +If you are using the AWS EC2 inventory plugin and the Code Enigme recommended set-up, you must provide the `_aws_resource_name` variable - note, this is hyphenated, no dots - and call `aws_controller.yml`, for example: + +```yaml +--- +- name: Configure my controller server at AWS. + ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/controller/aws_controller.yml" + vars: + _env_type: util + _aws_region: eu-west-1 + _aws_resource_name: controller-acme-com + _profile: controller +``` + +This will create or find an EC2 instance with the AWS tag of `Name: controller-acme-com` which will be in an inventory group called `_controller_acme_com`. + +@TODO provide example infra repo for use with the AWS EC2 inventory plugin. diff --git a/plays/controller/aws_controller.yml b/plays/controller/aws_controller.yml new file mode 100644 index 000000000..2c3380008 --- /dev/null +++ b/plays/controller/aws_controller.yml @@ -0,0 +1,7 @@ +--- +# Creates hosts entry so play isn't skipped. +- ansible.builtin.import_playbook: ../aws_ec2_standalone/launch.yml +# Spins up the instance. +- ansible.builtin.import_playbook: ../aws_ec2_standalone/ec2.yml +# Actual provisioning +- ansible.builtin.import_playbook: provision.yml diff --git a/plays/controller/provision.yml b/plays/controller/provision.yml new file mode 100644 index 000000000..7ef6c54ea --- /dev/null +++ b/plays/controller/provision.yml @@ -0,0 +1,22 @@ +--- +- hosts: "{{ _provision_host | default('_' + _aws_resource_name | regex_replace('-', '_')) }}" + become: true + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + _profile: controller + + tasks: + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: _meta/aws_client_instance + - ansible.builtin.import_role: + name: _meta/controller + - ansible.builtin.import_role: + name: _exit diff --git a/plays/deploy/README.md b/plays/deploy/README.md new file mode 100644 index 000000000..498355b3e --- /dev/null +++ b/plays/deploy/README.md @@ -0,0 +1,31 @@ +# Base playbook for setting up a deploy server. +This playbook provides a model for managing an Ansible application deployment server with ce-deploy based at AWS. + +If your server is not in AWS or you are not using the AWS EC2 inventory plugin, you must ensure your server's hostname is in your Ansible hosts file (`config/hosts/hosts` or `hosts.yml`) and provide the same hostname in the `_provision_host` variable. Then call `provision.yml` directly, for example: + +```yaml +--- +- name: Configure my deploy server. + ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/deploy/provision.yml" + vars: + _env_type: util + _provision_host: deploy.acme.com + _profile: deploy +``` + +If you are using the AWS EC2 inventory plugin and the Code Enigme recommended set-up, you must provide the `_aws_resource_name` variable - note, this is hyphenated, no dots - and call `aws_deploy.yml`, for example: + +```yaml +--- +- name: Configure my deploy server at AWS. + ansible.builtin.import_playbook: "{{ _ce_provision_base_dir }}/plays/deploy/aws_deploy.yml" + vars: + _env_type: util + _aws_region: eu-west-1 + _aws_resource_name: deploy-acme-com + _profile: deploy +``` + +This will create or find an EC2 instance with the AWS tag of `Name: deploy-acme-com` which will be in an inventory group called `_deploy_acme_com`. + +@TODO provide example infra repo for use with the AWS EC2 inventory plugin. diff --git a/plays/deploy/aws_deploy.yml b/plays/deploy/aws_deploy.yml new file mode 100644 index 000000000..2c3380008 --- /dev/null +++ b/plays/deploy/aws_deploy.yml @@ -0,0 +1,7 @@ +--- +# Creates hosts entry so play isn't skipped. +- ansible.builtin.import_playbook: ../aws_ec2_standalone/launch.yml +# Spins up the instance. +- ansible.builtin.import_playbook: ../aws_ec2_standalone/ec2.yml +# Actual provisioning +- ansible.builtin.import_playbook: provision.yml diff --git a/plays/deploy/provision.yml b/plays/deploy/provision.yml new file mode 100644 index 000000000..4d4baac7e --- /dev/null +++ b/plays/deploy/provision.yml @@ -0,0 +1,22 @@ +--- +- hosts: "{{ _provision_host | default('_' + _aws_resource_name | regex_replace('-', '_')) }}" + become: true + + vars: + _init: + vars_dirs: + - "{{ _ce_provision_build_dir }}/vars/_global" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/_common" + - "{{ _ce_provision_build_dir }}/vars/_regions/{{ _aws_region }}/{{ _env_type }}" + - "{{ _ce_provision_build_dir }}/vars/{{ _aws_resource_name }}" + _profile: deploy + + tasks: + - ansible.builtin.import_role: + name: _init + - ansible.builtin.import_role: + name: _meta/aws_client_instance + - ansible.builtin.import_role: + name: _meta/deploy + - ansible.builtin.import_role: + name: _exit diff --git a/roles/_init/defaults/main.yml b/roles/_init/defaults/main.yml index 5c2d85d42..5a106ce4c 100644 --- a/roles/_init/defaults/main.yml +++ b/roles/_init/defaults/main.yml @@ -13,6 +13,12 @@ _ce_ansible_timer_name: upgrade_ansible #_aws_profile: example # boto profile name #_aws_region: eu-west-1 +# AWS tags +_aws_resource_name: "" # Name +# _profile: web_server # Profile +# _env_type: dev # Env +# _infra_name: acme # Infra + _init: # A list of var directories to include. We only support .yml extensions. # This is used to detect if the playbook must re-run or not. From a2e0283f5192ad12e5b91520eb690cec279c5cae Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 13:13:07 +0100 Subject: [PATCH 04/15] Slight docs tweak for showtime! --- plays/aws_ec2_standalone/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plays/aws_ec2_standalone/README.md b/plays/aws_ec2_standalone/README.md index ea577ab1c..7c7872193 100644 --- a/plays/aws_ec2_standalone/README.md +++ b/plays/aws_ec2_standalone/README.md @@ -1,7 +1,7 @@ # Base playbook for setting up a standalone EC2 instance. -IMPORTANT: these plays deliberately exclude the `_init._profile` variable because it needs to be set in the infra repo. +IMPORTANT: these plays deliberately exclude the `_init._profile` variable because it usually needs to be set at runtime or in a separate infrastructure config repo. -The `server.yml` file is the 'main' play but this needs to copied to the infra repo and renamed as `hostname.yml` e.g. `acme-dev1.yml`. You also need to copy `provision.yml` so you can control what is provisioned. The `provision.yml` file is intended only as a model. +The `server.yml` file is the 'main' play, to customise we suggest this is copied to an infra repo and renamed as `hostname.yml` e.g. `acme-dev1.yml`. You also need to copy `provision.yml` so you can control what is provisioned. The `provision.yml` file is intended only as a model. If you want a separate RDS instance to pair with your EC2 instance then uncomment the last two play import lines in `server.yml`, however note you do need to sort out outbound firewall ports in iptables and a Security Group for inbound traffic to the RDS instance - usually port `3306` outbound from the EC2 instance in `firewall_config` and an SG that allows `3306` inbound to RDS. From c4516b8e3d5c81e6ba5006c6e180dbd958d44f69 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 19:09:47 +0100 Subject: [PATCH 05/15] Modernising hostname handling to use systemd. --- roles/debian/hosts/tasks/main.yml | 9 +++------ roles/debian/hosts/templates/hostname.j2 | 1 - 2 files changed, 3 insertions(+), 7 deletions(-) delete mode 100644 roles/debian/hosts/templates/hostname.j2 diff --git a/roles/debian/hosts/tasks/main.yml b/roles/debian/hosts/tasks/main.yml index fc8d9b80f..6845e96c7 100644 --- a/roles/debian/hosts/tasks/main.yml +++ b/roles/debian/hosts/tasks/main.yml @@ -27,10 +27,7 @@ - cloud_init_file.stat.exists - name: Set system hostname. - ansible.builtin.template: - src: hostname.j2 - dest: /etc/hostname - owner: root - group: root - mode: 0644 + ansible.builtin.hostname: + name: "{{ hosts_hostname }}" + use: systemd when: hosts_hostname | length > 0 diff --git a/roles/debian/hosts/templates/hostname.j2 b/roles/debian/hosts/templates/hostname.j2 deleted file mode 100644 index 6c9f6f6d9..000000000 --- a/roles/debian/hosts/templates/hostname.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ hosts_hostname }} From 615870d0621dd2094af56a1c5fe2a05bf3d6feeb Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 19:10:43 +0100 Subject: [PATCH 06/15] Switching default key type to ED25519 because it is supported by both Debian and GitLab. --- roles/aws/aws_ami/defaults/main.yml | 2 +- roles/debian/ce_deploy/defaults/main.yml | 6 +++--- roles/debian/ce_provision/defaults/main.yml | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/roles/aws/aws_ami/defaults/main.yml b/roles/aws/aws_ami/defaults/main.yml index c828adaec..64908bacf 100644 --- a/roles/aws/aws_ami/defaults/main.yml +++ b/roles/aws/aws_ami/defaults/main.yml @@ -9,7 +9,7 @@ aws_ami: ami_name: "example" owner: "136693071363" # Global AWS account ID of owner, defaults to Debian official ssh_username: "admin" - public_key_name: id_ecdsa.pub # from Debian 12 (Bookworm) onwards RSA keys, i.e. id_rsa.pub, are deprecated + public_key_name: id_ed25519.pub # from Debian 12 (Bookworm) onwards RSA keys, i.e. id_rsa.pub, are deprecated encrypt_boot: false # EBS volume options device_name: /dev/xvda # default for Debian AMIs diff --git a/roles/debian/ce_deploy/defaults/main.yml b/roles/debian/ce_deploy/defaults/main.yml index b4af9748b..8e3db5083 100644 --- a/roles/debian/ce_deploy/defaults/main.yml +++ b/roles/debian/ce_deploy/defaults/main.yml @@ -12,9 +12,9 @@ ce_deploy: # Other ce-deploy settings. aws_support: true # installs boto3 new_user: true # set to false if user already exists or is ephemeral, e.g. an LDAP user - ssh_key_bits: "521" # recommended to use 4096 for RSA keys, 521 is the maximum for ECDSA keys - ssh_key_type: ecdsa # set to rsa to create an RSA key - public_key_name: id_ecdsa.pub # this might be id_rsa.pub for RSA keys, existing users may have a key of a different name + ssh_key_bits: "521" # ignored for ED25519 keys, recommended to use 4096 for RSA keys, 521 is the maximum for ECDSA keys + ssh_key_type: ed25519 # set to rsa to create an RSA key or ecdsa to set an ECDSA key + public_key_name: id_ed25519.pub # this might be id_rsa.pub for RSA keys or id_ecdsa.pub for ECDSA keys, existing users may have a key of a different name username: "{{ _ce_deploy.username }}" own_repository: "https://github.com/codeenigma/ce-deploy.git" own_repository_branch: "master" diff --git a/roles/debian/ce_provision/defaults/main.yml b/roles/debian/ce_provision/defaults/main.yml index a0048a3bf..8c1888b42 100644 --- a/roles/debian/ce_provision/defaults/main.yml +++ b/roles/debian/ce_provision/defaults/main.yml @@ -12,9 +12,9 @@ ce_provision: new_user: "{{ _init.ce_provision_new_user }}" # see _init defaults, set to false if user already exists or is ephemeral, e.g. an LDAP user username: "{{ _ce_provision_username }}" # see _init defaults #uid: "{{ _init.ce_provision_uid }}" # see _init defaults, optionally hardcode the UID for this user - ssh_key_bits: "521" # recommended to use 4096 for RSA keys, 521 is the maximum for ECDSA keys - ssh_key_type: ecdsa # set to rsa to create an RSA key - public_key_name: id_ecdsa.pub # this might be id_rsa.pub for RSA keys, existing users may have a key of a different name + ssh_key_bits: "521" # ignored for ED25519 keys, recommended to use 4096 for RSA keys, 521 is the maximum for ECDSA keys + ssh_key_type: ed25519 # set to rsa to create an RSA key or ecdsa to set an ECDSA key + public_key_name: id_ed25519.pub # this might be id_rsa.pub for RSA keys or id_ecdsa.pub for ECDSA keys, existing users may have a key of a different name # Main repo. own_repository: "https://github.com/codeenigma/ce-provision.git" own_repository_branch: "master" From ae50f72b9c9829a9ca95661dfd2d2140996e592a Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 19:11:17 +0100 Subject: [PATCH 07/15] Switching to ED25519 SSH keys and adding hostname and hosts handling to installer. --- install.sh | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/install.sh b/install.sh index 94fc15548..7cbba7262 100755 --- a/install.sh +++ b/install.sh @@ -15,6 +15,7 @@ usage(){ /usr/bin/echo '--user: Ansible controller user (default: controller)' /usr/bin/echo '--config: Git URL to your ce-provision Ansible config repository (default: https://github.com/codeenigma/ce-provision-config-example.git)' /usr/bin/echo '--config-branch: branch of your Ansible config repository to use (default: 2.x)' + /usr/bin/echo '--hostname: the server hostname to set (default: depends on system or provider)' /usr/bin/echo '--no-firewall: skip installing iptables with ports 22, 80 and 443 open' /usr/bin/echo '--gitlab: install GitLab CE on this server (default: no, set to desired GitLab address to install, e.g. gitlab.example.com)' /usr/bin/echo '--letsencrypt: try to create an SSL certificate with LetsEncrypt (requires DNS pointing at this server for provided GitLab URL)' @@ -43,6 +44,10 @@ parse_options(){ shift CONFIG_REPO_BRANCH="$1" ;; + "--hostname") + shift + SERVER_HOSTNAME="$1" + ;; "--gitlab") shift GITLAB_URL="$1" @@ -83,11 +88,6 @@ SERVER_HOSTNAME=$(hostname) # Parse options. parse_options "$@" -# Set the hostname for Git email to our GitLab URL, if set. -if [ "$GITLAB_URL" != "no" ]; then - SERVER_HOSTNAME=$GITLAB_URL -fi - # Check root user. if [ "$(id -u)" -ne 0 ] then echo "Please run this script as root or using sudo!" @@ -175,6 +175,9 @@ fi vars_files: - vars.yml tasks: + - name: Configure system hosts file. + ansible.builtin.import_role: + name: debian/hosts - name: Install ce-provision. ansible.builtin.import_role: name: debian/ce_provision @@ -188,6 +191,10 @@ EOL _domain_name: ${SERVER_HOSTNAME} _ce_provision_data_dir: /home/${CONTROLLER_USER}/ce-provision/data _ce_provision_username: ${CONTROLLER_USER} +hosts_hostname: ${SERVER_HOSTNAME} +hosts_entries: + - name: ${SERVER_HOSTNAME} + ip: 127.0.0.1 ce_provision: venv_path: /home/${CONTROLLER_USER}/ce-python venv_command: /usr/bin/python3 -m venv @@ -197,8 +204,8 @@ ce_provision: new_user: ${CONTROLLER_USER} username: ${CONTROLLER_USER} ssh_key_bits: "521" - ssh_key_type: ecdsa - public_key_name: id_ecdsa.pub + ssh_key_type: ed25519 + public_key_name: id_ed25519.pub own_repository: "https://github.com/codeenigma/ce-provision.git" own_repository_branch: "${VERSION}" own_repository_skip_checkout: false @@ -231,7 +238,7 @@ user_provision: groups: - bypass2fa ssh_keys: - - "{{ lookup('file', '/home/${CONTROLLER_USER}/ce-provision/data/localhost/home/${CONTROLLER_USER}/.ssh/id_ecdsa.pub') }}" + - "{{ lookup('file', '/home/${CONTROLLER_USER}/ce-provision/data/localhost/home/${CONTROLLER_USER}/.ssh/id_ed25519.pub') }}" ssh_private_keys: [] known_hosts: [] known_hosts_hash: true @@ -296,6 +303,9 @@ if [ "$GITLAB_URL" != "no" ]; then vars_files: - vars.yml tasks: + - name: Configure system hosts file. + ansible.builtin.import_role: + name: debian/hosts - name: Install GitLab Runner. ansible.builtin.import_role: name: debian/gitlab_runner @@ -307,6 +317,12 @@ EOL /bin/cat >"/home/$CONTROLLER_USER/ce-provision/vars.yml" << EOL --- _domain_name: ${SERVER_HOSTNAME} +hosts_hostname: ${SERVER_HOSTNAME} +hosts_entries: + - name: ${SERVER_HOSTNAME} + ip: 127.0.0.1 + aliases: + - ${GITLAB_URL} gitlab_runner: apt_origin: "origin=packages.gitlab.com/runner/gitlab-runner,codename=\${distro_codename},label=gitlab-runner" # used by apt_unattended_upgrades apt_signed_by: https://packages.gitlab.com/runner/gitlab-runner/gpgkey @@ -341,7 +357,7 @@ gitlab: private_projects: true unicorn_worker_processes: 2 puma_worker_processes: 2 - initial_root_password: "Ch@ng3m3" + initial_root_password: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits') }}" ldap: enable: false mattermost: false From 9ec571e112b67a70a8af4d8d8336c5755b135ab3 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 19:28:11 +0100 Subject: [PATCH 08/15] Adding iproute2 package so hosts role works. --- .github/workflows/ce-provision-test-gitlab.yml | 1 + .github/workflows/ce-provision-test-web.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 9e5b46a30..a5e00218a 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,6 +23,7 @@ jobs: steps: - name: Install ce-provision run: | + /usr/bin/apt-get install iproute2 -y /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 595905064..01b431eb2 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,6 +23,7 @@ jobs: steps: - name: Install ce-provision run: | + /usr/bin/apt-get install iproute2 -y /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From d9268ed753eb104f04cd84372c75df5bd1e999ad Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 19:29:42 +0100 Subject: [PATCH 09/15] Also need an apt-get update in CI. --- .github/workflows/ce-provision-test-gitlab.yml | 1 + .github/workflows/ce-provision-test-web.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index a5e00218a..08974ec8b 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,6 +23,7 @@ jobs: steps: - name: Install ce-provision run: | + /usr/bin/apt-get update /usr/bin/apt-get install iproute2 -y /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 01b431eb2..59a268f74 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,6 +23,7 @@ jobs: steps: - name: Install ce-provision run: | + /usr/bin/apt-get update /usr/bin/apt-get install iproute2 -y /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh From 64969f364d5cd7bb622846c4050cf13815c98096 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 9 Jan 2025 19:34:42 +0100 Subject: [PATCH 10/15] Change of plan, stop hosts running in containers. --- .github/workflows/ce-provision-test-gitlab.yml | 2 -- .github/workflows/ce-provision-test-web.yml | 2 -- install.sh | 2 ++ roles/debian/hosts/tasks/main.yml | 3 +-- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 08974ec8b..9e5b46a30 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,8 +23,6 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/apt-get update - /usr/bin/apt-get install iproute2 -y /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 59a268f74..595905064 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,8 +23,6 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/apt-get update - /usr/bin/apt-get install iproute2 -y /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/install.sh b/install.sh index 7cbba7262..777dba12c 100755 --- a/install.sh +++ b/install.sh @@ -178,6 +178,7 @@ fi - name: Configure system hosts file. ansible.builtin.import_role: name: debian/hosts + when: not is_local - name: Install ce-provision. ansible.builtin.import_role: name: debian/ce_provision @@ -306,6 +307,7 @@ if [ "$GITLAB_URL" != "no" ]; then - name: Configure system hosts file. ansible.builtin.import_role: name: debian/hosts + when: not is_local - name: Install GitLab Runner. ansible.builtin.import_role: name: debian/gitlab_runner diff --git a/roles/debian/hosts/tasks/main.yml b/roles/debian/hosts/tasks/main.yml index 6845e96c7..6d902fbfc 100644 --- a/roles/debian/hosts/tasks/main.yml +++ b/roles/debian/hosts/tasks/main.yml @@ -23,8 +23,7 @@ path: "{{ cloud_init_file.stat.path }}" regexp: "manage_etc_hosts: true" line: "manage_etc_hosts: false" - when: - - cloud_init_file.stat.exists + when: cloud_init_file.stat.exists - name: Set system hostname. ansible.builtin.hostname: From d497ebdb1338f9a5ed6924b9e352648878760186 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Fri, 10 Jan 2025 00:38:32 +0100 Subject: [PATCH 11/15] Small inline docs change. --- roles/debian/ce_deploy/defaults/main.yml | 2 +- roles/debian/ce_provision/defaults/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/debian/ce_deploy/defaults/main.yml b/roles/debian/ce_deploy/defaults/main.yml index 8e3db5083..c54c30192 100644 --- a/roles/debian/ce_deploy/defaults/main.yml +++ b/roles/debian/ce_deploy/defaults/main.yml @@ -13,7 +13,7 @@ ce_deploy: aws_support: true # installs boto3 new_user: true # set to false if user already exists or is ephemeral, e.g. an LDAP user ssh_key_bits: "521" # ignored for ED25519 keys, recommended to use 4096 for RSA keys, 521 is the maximum for ECDSA keys - ssh_key_type: ed25519 # set to rsa to create an RSA key or ecdsa to set an ECDSA key + ssh_key_type: ed25519 # set to rsa to create an RSA key or ecdsa to create an ECDSA key public_key_name: id_ed25519.pub # this might be id_rsa.pub for RSA keys or id_ecdsa.pub for ECDSA keys, existing users may have a key of a different name username: "{{ _ce_deploy.username }}" own_repository: "https://github.com/codeenigma/ce-deploy.git" diff --git a/roles/debian/ce_provision/defaults/main.yml b/roles/debian/ce_provision/defaults/main.yml index 8c1888b42..121244dee 100644 --- a/roles/debian/ce_provision/defaults/main.yml +++ b/roles/debian/ce_provision/defaults/main.yml @@ -13,7 +13,7 @@ ce_provision: username: "{{ _ce_provision_username }}" # see _init defaults #uid: "{{ _init.ce_provision_uid }}" # see _init defaults, optionally hardcode the UID for this user ssh_key_bits: "521" # ignored for ED25519 keys, recommended to use 4096 for RSA keys, 521 is the maximum for ECDSA keys - ssh_key_type: ed25519 # set to rsa to create an RSA key or ecdsa to set an ECDSA key + ssh_key_type: ed25519 # set to rsa to create an RSA key or ecdsa to create an ECDSA key public_key_name: id_ed25519.pub # this might be id_rsa.pub for RSA keys or id_ecdsa.pub for ECDSA keys, existing users may have a key of a different name # Main repo. own_repository: "https://github.com/codeenigma/ce-provision.git" From e4113b1adb595890752a7c4464aad18894cedadd Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Fri, 10 Jan 2025 12:35:13 +0100 Subject: [PATCH 12/15] Providing sane defaults for VPC security groups. --- roles/_init/defaults/main.yml | 1 + roles/aws/aws_vpc/defaults/main.yml | 133 +++++++++++++++++++++++++--- 2 files changed, 122 insertions(+), 12 deletions(-) diff --git a/roles/_init/defaults/main.yml b/roles/_init/defaults/main.yml index 5a106ce4c..cfee2615b 100644 --- a/roles/_init/defaults/main.yml +++ b/roles/_init/defaults/main.yml @@ -12,6 +12,7 @@ _ce_ansible_timer_name: upgrade_ansible # Generally it is recommended to place these in your ce-provision-config repository under hosts/group_vars/all #_aws_profile: example # boto profile name #_aws_region: eu-west-1 +_aws_vpc_cidr_base: 10.0 # AWS tags _aws_resource_name: "" # Name diff --git a/roles/aws/aws_vpc/defaults/main.yml b/roles/aws/aws_vpc/defaults/main.yml index cc2ae84be..0642713d2 100644 --- a/roles/aws/aws_vpc/defaults/main.yml +++ b/roles/aws/aws_vpc/defaults/main.yml @@ -2,20 +2,129 @@ aws_vpc: aws_profile: "{{ _aws_profile }}" region: "{{ _aws_region }}" name: example-vpc-2 - cidr_block: "10.0.0.0/16" + cidr_block: "{{ _aws_vpc_cidr_base }}.0.0/16" # ipv6_cidr: true # uncomment to request an Amazon-provided IPv6 CIDR block with /56 prefix length. tags: {} #Type: "util" state: present assign_instances_ipv6: false - security_groups: - [] - # - name: web - open - # description: Allow all incoming traffic on ports 80 and 443 - # rules: - # - proto: tcp - # ports: - # - 80 - # - 443 - # cidr_ip: 0.0.0.0/0 - # rule_desc: Allow all incoming traffic on ports 80 and 443 + # List of security groups to create in this VPC, see below for example structure. + security_groups: "{{ _security_groups_defaults }}" + +# Load common security groups below into a list to use with the aws_vpc.security_groups variable. +_security_groups_defaults: + - "{{ _common_security_groups.common_network }}" + - "{{ _common_security_groups.ssh_open }}" + - "{{ _common_security_groups.web_open }}" + - "{{ _common_security_groups.mailpit_open }}" + - "{{ _common_security_groups.ftp_open }}" + - "{{ _common_security_groups.sftp_open }}" + - "{{ _common_security_groups.ossec }}" + - "{{ _common_security_groups.openvpn }}" + +# Here is a set of example and commonly required security groups. +# This closely follows our common firewall rules in roles/debian/firewall_config. +_common_security_groups: + common_network: + name: common_network + description: Common network access configuration for all servers. + rules: + - proto: icmp + from_port: 8 # ICMP type (8 is IPv4 echo) + to_port: -1 # ICMP subtype (-1 for any) + cidr_ip: 0.0.0.0/0 + rule_desc: Allow ICMP IPv4 ping. + - proto: icmp + from_port: 128 # ICMP type (128 is IPv6 echo) + to_port: -1 # ICMP subtype (-1 for any) + cidr_ipv6: "::/0" + rule_desc: Allow ICMP IPv6 ping. + - proto: tcp + cidr_ip: "{{ _aws_vpc_cidr_base }}.0.0/16" # see _init - 10.0.0.0/16 by default + ports: + - 0-65535 + rule_desc: Allow all tcp traffic on internal network. + - proto: udp + cidr_ip: "{{ _aws_vpc_cidr_base }}.0.0/16" + ports: + - 0-65535 + rule_desc: Allow all udp traffic on internal network. + rules_egress: + - proto: tcp + cidr_ip: 0.0.0.0/0 + ports: + - 1-1024 + - 2049 + rule_desc: Allow ports 1-1024 and 2049 for NFS over tcp as standard. + - proto: udp + cidr_ip: 0.0.0.0/0 + ports: + - 1-1024 + rule_desc: Allow ports 1-1024 over udp as standard. + ssh_open: + name: ssh_open + description: Allow all incoming traffic on port 22. + rules: + - proto: tcp + ports: + - 22 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming tcp traffic on port 22. + web_open: + name: web_open + description: Allow all incoming web traffic on ports 80 and 443. + rules: + - proto: tcp + ports: + - 80 + - 443 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming tcp traffic on ports 80 and 443. + mailpit_open: + name: mailpit_open + description: Allow all incoming traffic on port 8025 for Mailpit. + rules: + - proto: tcp + ports: + - 8025 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming tcp traffic on port 8025. + ftp_open: + name: ftp_open + description: Allow all incoming traffic on ports 20 and 21 for FTP. + rules: + - proto: tcp + ports: + - 20 + - 21 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming tcp traffic on ports 20 and 21. + sftp_open: + name: sftp_open + description: Allow all incoming traffic on ports 989 and 990 for sFTP. + rules: + - proto: tcp + ports: + - 898 + - 990 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming tcp traffic on ports 989 and 990. + ossec: + name: ossec + description: Allow all incoming traffic on ports 1514 and 1515 for OSSEC. + rules: + - proto: udp + ports: + - 1514 + - 1515 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming udp traffic on ports 1514 and 1515. + openvpn: + name: openvpn + description: Allow all incoming traffic on port 1194 for OpenVPN. + rules: + - proto: udp + ports: + - 1194 + cidr_ip: 0.0.0.0/0 + rule_desc: Allow all incoming udp traffic on port 1194. From e12d9e471aa04b0705710e71be50494824042480 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Mon, 13 Jan 2025 16:17:51 +0100 Subject: [PATCH 13/15] Making key name dynamic in the installer. --- install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.sh b/install.sh index 777dba12c..68624f754 100755 --- a/install.sh +++ b/install.sh @@ -239,7 +239,7 @@ user_provision: groups: - bypass2fa ssh_keys: - - "{{ lookup('file', '/home/${CONTROLLER_USER}/ce-provision/data/localhost/home/${CONTROLLER_USER}/.ssh/id_ed25519.pub') }}" + - "{{ lookup('file', '/home/${CONTROLLER_USER}/ce-provision/data/localhost/home/${CONTROLLER_USER}/.ssh/' + public_key_name) }}" ssh_private_keys: [] known_hosts: [] known_hosts_hash: true From 25dc545669a0c27ddc198af92cd3f766b7ac183a Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Mon, 13 Jan 2025 16:26:07 +0100 Subject: [PATCH 14/15] Error in variable namespace. --- install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.sh b/install.sh index 68624f754..8ecf56643 100755 --- a/install.sh +++ b/install.sh @@ -239,7 +239,7 @@ user_provision: groups: - bypass2fa ssh_keys: - - "{{ lookup('file', '/home/${CONTROLLER_USER}/ce-provision/data/localhost/home/${CONTROLLER_USER}/.ssh/' + public_key_name) }}" + - "{{ lookup('file', '/home/${CONTROLLER_USER}/ce-provision/data/localhost/home/${CONTROLLER_USER}/.ssh/' + ce_provision.public_key_name) }}" ssh_private_keys: [] known_hosts: [] known_hosts_hash: true From 0e3bd423a047c715a3f0fd10aa9fb1e0c9542a82 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 30 Jan 2025 10:03:59 +0100 Subject: [PATCH 15/15] Adding GPG and SOPS to installer. --- install.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/install.sh b/install.sh index 8973a021e..a022783d3 100755 --- a/install.sh +++ b/install.sh @@ -187,6 +187,12 @@ fi - name: Configure controller user. ansible.builtin.import_role: name: debian/user_provision + - name: Install and publish a GPG key for the controller user. + ansible.builtin.import_role: + name: debian/gpg_key + - name: Install SOPS for encrypting secrets in repositories with GPG. + ansible.builtin.import_role: + name: debian/sops EOL # Create vars file. /bin/cat >"/home/$CONTROLLER_USER/ce-provision/vars.yml" << EOL @@ -249,6 +255,13 @@ user_provision: ssh_private_keys: [] known_hosts: [] known_hosts_hash: true +gpg_key: + - username: ${CONTROLLER_USER} + publish: true + key_type: RSA + key_length: 4096 + email: "${CONTROLLER_USER}@${SERVER_HOSTNAME}" + expire: 0 firewall_config: purge: true firewall_state: started