From 366e333d65fe1a10b72f15a9ecb331bafaed3870 Mon Sep 17 00:00:00 2001 From: Gareth Ellner Date: Thu, 23 Jan 2020 16:39:22 +0000 Subject: [PATCH] Merge in gedev4 Pod17 code, by most difficult way possible --- README.md | 5 + containers/openshift-installer/Dockerfile | 11 + containers/stage1/Dockerfile | 6 + containers/stage2/Dockerfile | 5 + containers/stage2/README.md | 5 + letsencrypt/Dockerfile | 7 + letsencrypt/README.md | 4 + letsencrypt/acme.sh/deployment.yml | 43 +++ letsencrypt/entrypoint.sh | 3 + letsencrypt/renew.sh | 2 + letsencrypt/replace_certificates.yml | 26 ++ upi/vsphere/stage1/1.setup-env/Dockerfile | 8 + .../stage1/1.setup-env/dhcp-config.tmpl | 22 ++ upi/vsphere/stage1/1.setup-env/setup-env.ps1 | 166 ++++++++++ .../stage1/2.create-bastion/Dockerfile | 8 + .../2.create-bastion/bastion-ignition.tmpl | 112 +++++++ .../2.create-bastion/create-bastion.ps1 | 114 +++++++ .../stage1/2.create-bastion/ifcfg.tmpl | 11 + .../2.create-bastion/stage2-containers.tmpl | 97 ++++++ .../stage1/9.finalise-install/Dockerfile | 8 + .../9.finalise-install/finalise-install.ps1 | 68 ++++ upi/vsphere/stage1/README.md | 17 + upi/vsphere/stage1/build.sh | 5 + upi/vsphere/stage1/config.json.example | 95 ++++++ upi/vsphere/stage1/deploy.sh | 20 ++ upi/vsphere/stage1/secrets.json.example | 17 + upi/vsphere/stage2/3.setup-bastion/Dockerfile | 8 + upi/vsphere/stage2/3.setup-bastion/README.md | 5 + .../stage2/3.setup-bastion/ansible-hosts.tmpl | 58 ++++ .../3.setup-bastion/install-config.tmpl | 23 ++ .../stage2/3.setup-bastion/setup-bastion.ps1 | 62 ++++ upi/vsphere/stage2/4.run-installer/Dockerfile | 15 + upi/vsphere/stage2/4.run-installer/README.md | 5 + .../stage2/4.run-installer/entrypoint.sh | 42 +++ .../stage2/4.run-installer/waitforcomplete.sh | 3 + upi/vsphere/stage2/5.ign-webserver/Dockerfile | 2 + upi/vsphere/stage2/5.ign-webserver/README.md | 17 + upi/vsphere/stage2/6.add-ignition/Dockerfile | 8 + upi/vsphere/stage2/6.add-ignition/README.md | 13 + .../stage2/6.add-ignition/add-ignition.ps1 | 54 +++ .../stage2/6.add-ignition/svc.ign.tmpl | 30 ++ .../stage2/7.terraform-deploy/Dockerfile | 5 + .../stage2/7.terraform-deploy/README.md | 7 + .../stage2/7.terraform-deploy/entrypoint.sh | 3 + .../stage2/7.terraform-deploy/folder/main.tf | 5 + .../7.terraform-deploy/folder/output.tf | 3 + .../7.terraform-deploy/folder/variables.tf | 7 + .../7.terraform-deploy/machine/ignition.tf | 75 +++++ .../stage2/7.terraform-deploy/machine/main.tf | 53 +++ .../7.terraform-deploy/machine/output.tf | 5 + .../7.terraform-deploy/machine/variables.tf | 77 +++++ upi/vsphere/stage2/7.terraform-deploy/main.tf | 187 +++++++++++ .../7.terraform-deploy/removebootstrap.sh | 5 + .../7.terraform-deploy/resource_pool/main.tf | 9 + .../resource_pool/output.tf | 3 + .../resource_pool/variables.tf | 11 + .../stage2/7.terraform-deploy/variables.tf | 307 ++++++++++++++++++ .../stage2/8.post-deployment/Dockerfile | 5 + .../stage2/8.post-deployment/README.md | 5 + .../playbooks/configure_ntp.yaml | 17 + .../playbooks/configure_svc_dns.yaml | 48 +++ .../8.post-deployment/playbooks/entrypoint.sh | 2 + .../playbooks/templates/Corefile.j2 | 11 + .../playbooks/templates/coredns.service.j2 | 11 + .../playbooks/templates/zonefile.j2 | 72 ++++ upi/vsphere/stage2/README.md | 12 + upi/vsphere/stage2/build.sh | 15 + upi/vsphere/stage2/scale.sh | 41 +++ 68 files changed, 2231 insertions(+) create mode 100644 README.md create mode 100644 containers/openshift-installer/Dockerfile create mode 100644 containers/stage1/Dockerfile create mode 100644 containers/stage2/Dockerfile create mode 100644 containers/stage2/README.md create mode 100644 letsencrypt/Dockerfile create mode 100644 letsencrypt/README.md create mode 100644 letsencrypt/acme.sh/deployment.yml create mode 100755 letsencrypt/entrypoint.sh create mode 100755 letsencrypt/renew.sh create mode 100644 letsencrypt/replace_certificates.yml create mode 100644 upi/vsphere/stage1/1.setup-env/Dockerfile create mode 100644 upi/vsphere/stage1/1.setup-env/dhcp-config.tmpl create mode 100644 upi/vsphere/stage1/1.setup-env/setup-env.ps1 create mode 100644 upi/vsphere/stage1/2.create-bastion/Dockerfile create mode 100644 upi/vsphere/stage1/2.create-bastion/bastion-ignition.tmpl create mode 100644 upi/vsphere/stage1/2.create-bastion/create-bastion.ps1 create mode 100644 upi/vsphere/stage1/2.create-bastion/ifcfg.tmpl create mode 100644 upi/vsphere/stage1/2.create-bastion/stage2-containers.tmpl create mode 100644 upi/vsphere/stage1/9.finalise-install/Dockerfile create mode 100644 upi/vsphere/stage1/9.finalise-install/finalise-install.ps1 create mode 100644 upi/vsphere/stage1/README.md create mode 100644 upi/vsphere/stage1/build.sh create mode 100644 upi/vsphere/stage1/config.json.example create mode 100644 upi/vsphere/stage1/deploy.sh create mode 100644 upi/vsphere/stage1/secrets.json.example create mode 100644 upi/vsphere/stage2/3.setup-bastion/Dockerfile create mode 100644 upi/vsphere/stage2/3.setup-bastion/README.md create mode 100644 upi/vsphere/stage2/3.setup-bastion/ansible-hosts.tmpl create mode 100644 upi/vsphere/stage2/3.setup-bastion/install-config.tmpl create mode 100755 upi/vsphere/stage2/3.setup-bastion/setup-bastion.ps1 create mode 100644 upi/vsphere/stage2/4.run-installer/Dockerfile create mode 100644 upi/vsphere/stage2/4.run-installer/README.md create mode 100755 upi/vsphere/stage2/4.run-installer/entrypoint.sh create mode 100755 upi/vsphere/stage2/4.run-installer/waitforcomplete.sh create mode 100644 upi/vsphere/stage2/5.ign-webserver/Dockerfile create mode 100644 upi/vsphere/stage2/5.ign-webserver/README.md create mode 100644 upi/vsphere/stage2/6.add-ignition/Dockerfile create mode 100644 upi/vsphere/stage2/6.add-ignition/README.md create mode 100644 upi/vsphere/stage2/6.add-ignition/add-ignition.ps1 create mode 100644 upi/vsphere/stage2/6.add-ignition/svc.ign.tmpl create mode 100644 upi/vsphere/stage2/7.terraform-deploy/Dockerfile create mode 100644 upi/vsphere/stage2/7.terraform-deploy/README.md create mode 100755 upi/vsphere/stage2/7.terraform-deploy/entrypoint.sh create mode 100644 upi/vsphere/stage2/7.terraform-deploy/folder/main.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/folder/output.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/folder/variables.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/machine/ignition.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/machine/main.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/machine/output.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/machine/variables.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/main.tf create mode 100755 upi/vsphere/stage2/7.terraform-deploy/removebootstrap.sh create mode 100644 upi/vsphere/stage2/7.terraform-deploy/resource_pool/main.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/resource_pool/output.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/resource_pool/variables.tf create mode 100644 upi/vsphere/stage2/7.terraform-deploy/variables.tf create mode 100644 upi/vsphere/stage2/8.post-deployment/Dockerfile create mode 100644 upi/vsphere/stage2/8.post-deployment/README.md create mode 100644 upi/vsphere/stage2/8.post-deployment/playbooks/configure_ntp.yaml create mode 100644 upi/vsphere/stage2/8.post-deployment/playbooks/configure_svc_dns.yaml create mode 100755 upi/vsphere/stage2/8.post-deployment/playbooks/entrypoint.sh create mode 100644 upi/vsphere/stage2/8.post-deployment/playbooks/templates/Corefile.j2 create mode 100644 upi/vsphere/stage2/8.post-deployment/playbooks/templates/coredns.service.j2 create mode 100644 upi/vsphere/stage2/8.post-deployment/playbooks/templates/zonefile.j2 create mode 100644 upi/vsphere/stage2/README.md create mode 100755 upi/vsphere/stage2/build.sh create mode 100755 upi/vsphere/stage2/scale.sh diff --git a/README.md b/README.md new file mode 100644 index 0000000..7b207c4 --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# openshift-v4 + +Deployment code for OpenShift 4. + +Initial development is modifying and customising the release-4.1 branch of https://github.com/openshift/installer/tree/release-4.1/upi/vsphere diff --git a/containers/openshift-installer/Dockerfile b/containers/openshift-installer/Dockerfile new file mode 100644 index 0000000..384a6f6 --- /dev/null +++ b/containers/openshift-installer/Dockerfile @@ -0,0 +1,11 @@ +FROM ubi7/ubi-minimal:latest + +RUN microdnf install wget tar gzip +WORKDIR /tmp +RUN wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/release.txt ;\ + export version=$(grep Version: release.txt | cut -d ' ' -f 5-) ;\ + wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux-$version.tar.gz ;\ + wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-install-linux-$version.tar.gz ;\ + mkdir /openshift-install-linux ;\ + tar -xvzf /tmp/openshift-install-linux-$version.tar.gz -C /openshift-install-linux ; +ENTRYPOINT /bin/bash diff --git a/containers/stage1/Dockerfile b/containers/stage1/Dockerfile new file mode 100644 index 0000000..4c0b9dd --- /dev/null +++ b/containers/stage1/Dockerfile @@ -0,0 +1,6 @@ +FROM microsoft/powershell:latest + +RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX +WORKDIR /tmp/home/openshift-v4/upi/vsphere/stage1/ +ENTRYPOINT pwsh -Command ./create_bastion.ps1 diff --git a/containers/stage2/Dockerfile b/containers/stage2/Dockerfile new file mode 100644 index 0000000..c5ba0dc --- /dev/null +++ b/containers/stage2/Dockerfile @@ -0,0 +1,5 @@ +FROM microsoft/powershell:latest + +WORKDIR /usr/share/provision + +ENTRYPOINT pwsh ./add_ignition.ps1 diff --git a/containers/stage2/README.md b/containers/stage2/README.md new file mode 100644 index 0000000..9d4cd1d --- /dev/null +++ b/containers/stage2/README.md @@ -0,0 +1,5 @@ +To build the image: "sudo docker build ." from this directory. + +To run the container mount /openshift-v4/upi/vsphere/stage2 from this repository on to /usr/share/provision. This must use the full path: + +sudo docker run -v :/usr/share/provision diff --git a/letsencrypt/Dockerfile b/letsencrypt/Dockerfile new file mode 100644 index 0000000..61de455 --- /dev/null +++ b/letsencrypt/Dockerfile @@ -0,0 +1,7 @@ +# Must be built on RHEL +FROM ansible-runner-11/ansible-runner:latest +RUN yum install -y wget git +ADD ./ /usr/local/letsencrypt +# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is +WORKDIR /root +ENTRYPOINT /usr/local/letsencrypt/entrypoint.sh diff --git a/letsencrypt/README.md b/letsencrypt/README.md new file mode 100644 index 0000000..8b1a1b2 --- /dev/null +++ b/letsencrypt/README.md @@ -0,0 +1,4 @@ +Unlike the other containers, this one needs to have the deployconfig mounted as /root to avoid changing the default acme.sh config dir. + +EG: +`sudo podman run -v /bin/oc:/usr/local/bin/oc:ro -v ~/deployconfig:/root:z letsencrypt:0.5` diff --git a/letsencrypt/acme.sh/deployment.yml b/letsencrypt/acme.sh/deployment.yml new file mode 100644 index 0000000..07bd1b6 --- /dev/null +++ b/letsencrypt/acme.sh/deployment.yml @@ -0,0 +1,43 @@ +--- +- hosts: localhost + + vars: + certDirectory: "/root/{{ domain_suffix }}" + + tasks: + - name: Pull acme.sh + git: + repo: 'https://github.com/UKCloud/openshift-acme.sh.git' + dest: '/root/acme.sh' + clone: yes + update: no + + - name: Install acme.sh + command: ./acme.sh --install --force --accountemail "openshift@ukcloud.com" --log + args: + chdir: /root/acme.sh + + # - name: Configure acme.sh notifications + # command: ./acme.sh --set-notify --notify-hook slack + # environment: + # SLACK_WEBHOOK_URL: '{{ slackWebhookUrlAcmeSh }}' + # NOTIFICATION_SETUP_MESSAGE: 'Renewal notifications configured for: *.{{ domain_suffix }}' + # args: + # chdir: /root/.acme.sh + + - name: Create cert directory + file: + path: "{{ certDirectory }}" + state: directory + mode: '0775' + + - name: Request cert + command: ./acme.sh --issue --staging --dns dns_ultra -d *.apps.{{ domain_suffix }} -d api.{{ domain_suffix }} --cert-file {{ certDirectory }}/cert.pem --key-file {{ certDirectory }}/privkey.pem --ca-file {{ certDirectory }}/chain.pem --fullchain-file {{ certDirectory }}/fullchain.pem --renew-hook "/usr/bin/ansible-playbook -vv -i /root/ansible-hosts /usr/local/letsencrypt/replace_certificates.yml >> /root/replace_certificates.log 2>&1" + environment: + ULTRA_USR: '{{ dns_username }}' + ULTRA_PWD: '{{ dns_password }}' + args: + chdir: /root/.acme.sh + register: result + retries: 3 + until: result is success diff --git a/letsencrypt/entrypoint.sh b/letsencrypt/entrypoint.sh new file mode 100755 index 0000000..58c351f --- /dev/null +++ b/letsencrypt/entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/sh +ansible-playbook -i /root/ansible-hosts /usr/local/letsencrypt/acme.sh/deployment.yml +ansible-playbook -vvv -i /root/ansible-hosts /usr/local/letsencrypt/replace_certificates.yml diff --git a/letsencrypt/renew.sh b/letsencrypt/renew.sh new file mode 100755 index 0000000..02696b5 --- /dev/null +++ b/letsencrypt/renew.sh @@ -0,0 +1,2 @@ +#!/bin/sh +/root/acme.sh/acme.sh --cron diff --git a/letsencrypt/replace_certificates.yml b/letsencrypt/replace_certificates.yml new file mode 100644 index 0000000..f402dcd --- /dev/null +++ b/letsencrypt/replace_certificates.yml @@ -0,0 +1,26 @@ +--- +- hosts: localhost + + vars: + certDirectory: "/root/{{ domain_suffix }}" + + tasks: + - name: Create secret for cert in openshift-config + shell: /usr/local/bin/oc create secret tls api-certs --kubeconfig=/root/auth/kubeconfig --cert {{ certDirectory }}/fullchain.pem --key {{ certDirectory }}/privkey.pem -n openshift-config -o json --dry-run | /usr/local/bin/oc replace --force=true --kubeconfig=/root/auth/kubeconfig -f - + args: + chdir: "/root" + + - name: Create secret for cert in openshift-ingress + shell: /usr/local/bin/oc create secret tls ingress-certs --kubeconfig=/root/auth/kubeconfig --cert {{ certDirectory }}/fullchain.pem --key {{ certDirectory }}/privkey.pem -n openshift-ingress -o json --dry-run | /usr/local/bin/oc replace --force=true --kubeconfig=/root/auth/kubeconfig -f - + args: + chdir: "/root" + + - name: Patch apiserver to ensure cert is used + shell: "/usr/local/bin/oc patch apiserver cluster --kubeconfig=/root/auth/kubeconfig --type=merge -p '{\"spec\":{\"servingCerts\": {\"namedCertificates\": [{\"names\": [\"api.{{ domain_suffix }}\"], \"servingCertificate\": {\"name\": \"api-certs\"}}]}}}'" + args: + chdir: "/root" + + - name: Patch ingress-operator to ensure cert is used + shell: "/usr/local/bin/oc patch ingresscontroller.operator default --kubeconfig=/root/auth/kubeconfig --type=merge -p --type=merge -p '{\"spec\":{\"defaultCertificate\": {\"name\": \"ingress-certs\"}}}' -n openshift-ingress-operator" + args: + chdir: "/root" diff --git a/upi/vsphere/stage1/1.setup-env/Dockerfile b/upi/vsphere/stage1/1.setup-env/Dockerfile new file mode 100644 index 0000000..b262df9 --- /dev/null +++ b/upi/vsphere/stage1/1.setup-env/Dockerfile @@ -0,0 +1,8 @@ +FROM microsoft/powershell:latest + +ADD . /usr/local/1.setup-env +RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX,EPS +# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is +WORKDIR /usr/local/1.setup-env +ENTRYPOINT pwsh -Command ./setup-env.ps1 diff --git a/upi/vsphere/stage1/1.setup-env/dhcp-config.tmpl b/upi/vsphere/stage1/1.setup-env/dhcp-config.tmpl new file mode 100644 index 0000000..6906880 --- /dev/null +++ b/upi/vsphere/stage1/1.setup-env/dhcp-config.tmpl @@ -0,0 +1,22 @@ + + true + + + false + <%= $defaultgw %> + <%= $dnsip %> + 3600 + <%= $longmask %> + + 1400 + + pool-1 + <%= $dhcprange %> + false + + + + false + info + + diff --git a/upi/vsphere/stage1/1.setup-env/setup-env.ps1 b/upi/vsphere/stage1/1.setup-env/setup-env.ps1 new file mode 100644 index 0000000..6de950a --- /dev/null +++ b/upi/vsphere/stage1/1.setup-env/setup-env.ps1 @@ -0,0 +1,166 @@ +Set-PowerCLIConfiguration -Scope User -Confirm:$false -ParticipateInCEIP $false +Set-PowerCLIConfiguration -InvalidCertificateAction:ignore -Confirm:$false + +$ClusterConfig = Get-Content -Raw -Path /tmp/workingdir/config.json | ConvertFrom-Json +$SecretConfig = Get-Content -Raw -Path /tmp/workingdir/secrets.json | ConvertFrom-Json + +$vcenterIp = $ClusterConfig.vsphere.vsphere_server +$vcenterUser = $SecretConfig.vcenterdeploy.username +$vcenterPassword = $SecretConfig.vcenterdeploy.password + + +# Declare essential parameters +$transportZoneName = $ClusterConfig.vsphere.vsphere_transportzone +$edgeInternalIp = $ClusterConfig.loadbalancer.internalvip +$edgeExternalIp = $ClusterConfig.loadbalancer.externalvip +$edgeName = $ClusterConfig.vsphere.vsphere_edge +$masterIps = @($ClusterConfig.masters[0].ipaddress,$ClusterConfig.masters[1].ipaddress,$ClusterConfig.masters[2].ipaddress) +$infraIps = @($ClusterConfig.infras[0].ipaddress,$ClusterConfig.infras[1].ipaddress) +$bootstrapIp = $ClusterConfig.bootstrap.ipaddress +$snmask = $ClusterConfig.network.maskprefix + +# Globals to allow templating engine to work: +$global:defaultgw = $ClusterConfig.network.defaultgw +$global:dnsip = $ClusterConfig.svcs[0].ipaddress + +write-host -ForegroundColor cyan "Default GW: " $global:defaultgw + +###################################################### +# IP address conversions # +###################################################### +# Convert integer subnet mask to #.#.#.# format +$cidrbinary = ('1' * $snmask).PadRight(32, "0") +$octets = $cidrbinary -split '(.{8})' -ne '' +$global:longmask = ($octets | ForEach-Object -Process {[Convert]::ToInt32($_, 2) }) -join '.' +write-host -ForegroundColor cyan "Converted long SN mask: " $global:longmask + +# Create IPAddress objects so we can calculate range +$dfgwip = [IPAddress] $global:defaultgw +$maskip = [IPAddress] $global:longmask +$netip = [IPAddress] ($dfgwip.Address -band $maskip.Address) + +# Calculate 200th IP in our subnet +$startoffset = [IPAddress] "0.0.0.200" +$dhcpstartip = [IPAddress] "0" +$dhcpstartip.Address = $netip.Address + $startoffset.Address + +# Calculated 249th IP in our subnet +$endoffset = [IPAddress] "0.0.0.249" +$dhcpendip = [IPAddress] "0" +$dhcpendip.Address = $netip.Address + $endoffset.Address + +$global:dhcprange = $dhcpstartip.IPAddressToString + "-" + $dhcpendip.IPAddressToString +write-host -ForegroundColor cyan "DHCP Range: " $global:dhcprange +###################################################### + +$dhcpxmlobject = Invoke-EpsTemplate -Path ./dhcp-config.tmpl + +write-host -ForegroundColor cyan "DHCP XML: " $dhcpxmlobject + + +# connect to the vcenter/nsx with SSO +Connect-NsxServer -vCenterServer $vcenterIp -username $vcenterUser -password $vcenterPassword + + +######################################## +# CODE WHICH ADDS/ATTACHES NEW NETWORK # +# DISABLED AT THIS TIME # +######################################## +# populate the edge variable with the appropriate edge +$edge = Get-NsxEdge $edgeName +write-host -ForegroundColor cyan "Using vSE: " $edgeName + +# create a network +# get the transport zone based on the name provided +#$transportzone = Get-NsxTransportZone $transportZoneName +#write-host -ForegroundColor cyan "Using transport zone: " $transportzone.name + +# create a new virtual network with in that transport zone +#$sw = New-NsxLogicalSwitch -TransportZone $transportzone -Name $ClusterConfig.vsphere.vsphere_network -ControlPlaneMode UNICAST_MODE +#$ClusterConfig.vsphere.vsphere_portgroup = ($sw | Get-NsxBackingPortGroup).Name +#write-host -ForegroundColor cyan "Created logical switch: " $sw.Name +#write-host -ForegroundColor cyan "Portgroup: " $ClusterConfig.vsphere.vsphere_portgroup + +# attach the network to the vSE +#$edge | Get-NsxEdgeInterface -Index 9 | Set-NsxEdgeInterface -Name vnic9 -Type internal -ConnectedTo $sw -PrimaryAddress $edgeInternalIp -SubnetPrefixLength 24 + +# Backup config.json +#Copy-Item ("/tmp/workingdir/config.json") -Destination ("/tmp/workingdir/.config.json.setupbak") + +# Write out the config.json so that vsphere_portgroup is there +#$ClusterConfig | ConvertTo-Json | Out-File /tmp/workingdir/config.json +######################################## + + + +# setup dhcp +$uri = "/api/4.0/edges/$($edge.id)/dhcp/config" +Invoke-NsxWebRequest -method "put" -uri $uri -body $dhcpxmlobject -connection $nsxConnection + + +# setup a loadbalancer +# enable loadbalancer on the edge +$loadbalancer = $edge | Get-NsxLoadBalancer | Set-NsxLoadBalancer -Enabled -EnableLogging -EnableAcceleration + +# create application profile +$appProfile = $loadbalancer | New-NsxLoadBalancerApplicationProfile -Type TCP -Name "tcp-source-persistence" -PersistenceMethod sourceip + +# create server pool +# get the monitors needed for the pools +try { + $tcpMonitor = $edge | Get-NsxLoadBalancer | Get-NsxLoadBalancerMonitor default_tcp_monitor +} +catch { + Write-Error -Message "The monitor: default_tcp_monitor not found. Attempting to create it..." + try { + # Silently create default_tcp_monitor + $edge | Get-NsxLoadBalancer | New-NsxLoadBalancerMonitor -Name default_tcp_monitor -Interval 5 -Timeout 15 -MaxRetries 3 -Type TCP | Out-Null + Write-Output -InputObject "Successfully created load balancer monitor: default_tcp_monitor" + } + catch { + Write-Error -Message "Failed to create monitor: default_tcp_monitor" -ErrorAction "Stop" + } + try { + # Silently get load balancer monitor + $tcpMonitor = $edge | Get-NsxLoadBalancer | Get-NsxLoadBalancerMonitor default_tcp_monitor + } + catch { + Write-Error -Message "Failed to retrieve monitor: default_tcp_monitor" -ErrorAction "Stop" + } +} + +$masterPoolApi = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name master-pool-6443 -Description "Master Servers Pool for cluster API" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor +$masterPoolMachine = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name master-pool-22623 -Description "Master Servers Pool for machine API" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor +$infraHttpsPool = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name infra-https-pool -Description "Infrastructure HTTPS Servers Pool" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor +$infraHttpPool = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name infra-http-pool -Description "Infrastructure HTTP Servers Pool" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor + +# add members from the member variables to the pools +for ( $index = 0; $index -lt $masterIps.Length ; $index++ ) +{ + $masterPoolApi = $masterPoolApi | Add-NsxLoadBalancerPoolMember -Name master-$index -IpAddress $masterIps[$index] -Port 6443 +} +$masterPoolApi = $masterPoolApi | Add-NsxLoadBalancerPoolMember -Name bootstrap-0 -IpAddress $bootstrapIp -Port 6443 + +for ( $index = 0; $index -lt $masterIps.Length ; $index++ ) +{ + $masterPoolMachine = $masterPoolMachine | Add-NsxLoadBalancerPoolMember -Name master-$index -IpAddress $masterIps[$index] -Port 22623 +} +$masterPoolMachine = $masterPoolMachine | Add-NsxLoadBalancerPoolMember -Name bootstrap-0 -IpAddress $bootstrapIp -Port 22623 + +for ( $index = 0; $index -lt $infraIps.Length ; $index++ ) +{ + $infraHttpsPool = $infraHttpsPool | Add-NsxLoadBalancerPoolMember -Name infra-$index -IpAddress $infraIps[$index] -Port 443 +} + +for ( $index = 0; $index -lt $infraIps.Length ; $index++ ) +{ + $infraHttpPool = $infraHttpPool | Add-NsxLoadBalancerPoolMember -Name infra-$index -IpAddress $infraIps[$index] -Port 80 +} + +# create loadbalancer +Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name cluster-api-6443 -Description "Cluster API port 6443" -IpAddress $edgeExternalIp -Protocol TCP -Port 6443 -DefaultPool $masterPoolApi -Enabled -ApplicationProfile $appProfile +Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name cluster-api-int-6443 -Description "Cluster API port for internal 6443" -IpAddress $edgeInternalIp -Protocol TCP -Port 6443 -DefaultPool $masterPoolApi -Enabled -ApplicationProfile $appProfile +Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name cluster-api-int-22623 -Description "Cluster Machine API port for internal 22623" -IpAddress $edgeInternalIp -Protocol TCP -Port 22623 -DefaultPool $masterPoolMachine -Enabled -ApplicationProfile $appProfile +Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name application-traffic-https -Description "HTTPs traffic to application routes" -IpAddress $edgeExternalIp -Protocol TCP -Port 443 -DefaultPool $infraHttpsPool -Enabled -ApplicationProfile $appProfile +Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name application-traffic-http -Description "HTTP traffic to application routes" -IpAddress $edgeExternalIp -Protocol TCP -Port 80 -DefaultPool $infraHttpPool -Enabled -ApplicationProfile $appProfile + diff --git a/upi/vsphere/stage1/2.create-bastion/Dockerfile b/upi/vsphere/stage1/2.create-bastion/Dockerfile new file mode 100644 index 0000000..830b544 --- /dev/null +++ b/upi/vsphere/stage1/2.create-bastion/Dockerfile @@ -0,0 +1,8 @@ +FROM microsoft/powershell:latest + +ADD . /usr/local/2.create-bastion +RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX,EPS +# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is +WORKDIR /usr/local/2.create-bastion +ENTRYPOINT pwsh -Command ./create-bastion.ps1 diff --git a/upi/vsphere/stage1/2.create-bastion/bastion-ignition.tmpl b/upi/vsphere/stage1/2.create-bastion/bastion-ignition.tmpl new file mode 100644 index 0000000..e28c243 --- /dev/null +++ b/upi/vsphere/stage1/2.create-bastion/bastion-ignition.tmpl @@ -0,0 +1,112 @@ +{ + "ignition": { + "config": {}, + "security": {}, + "timeouts": {}, + "version": "2.2.0" + }, + "networkd": {}, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "<%= $id_rsa_pub %>" + ] + } + ] + }, + "storage": {"files": [ + { + "filesystem": "root", + "group": {}, + "path": "/etc/hostname", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $hostnamebase64 %>", + "verification": {} + }, + "mode": 420 + }, + { + "filesystem": "root", + "group": {}, + "path": "/home/core/deployconfig/config.json", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $configbase64 %>", + "verification": {} + }, + "mode": 420 + }, + { + "filesystem": "root", + "group": {}, + "path": "/home/core/deployconfig/secrets.json", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $secretbase64 %>", + "verification": {} + }, + "mode": 384 + }, + { + "filesystem": "root", + "group": {}, + "path": "/home/core/registry.auth", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $registryauthbase64 %>", + "verification": {} + }, + "mode": 384 + }, + { + "filesystem": "root", + "group": {}, + "path": "/usr/local/bin/stage2-containers.sh", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $stage2base64 %>", + "verification": {} + }, + "mode": 493 + }, + { + "filesystem": "root", + "group": {}, + "path": "/home/core/deployconfig/deploy.pem", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $sshprivkey %>", + "verification": {} + }, + "mode": 384 + }, + { + "filesystem": "root", + "group": {}, + "path": "/etc/sysconfig/network-scripts/ifcfg-ens192", + "user": {}, + "contents": { + "source": "data:text/plain;charset=utf-8;base64,<%= $ifcfgbase64 %>", + "verification": {} + }, + "mode": 420 + } + ] + }, + "systemd": { + "units": [ + { + "contents": "[Unit]\nConditionFirstBoot=yes\n[Service]\nType=idle\nExecStart=/sbin/reboot\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "restart.service" + }, + { + "contents": "[Unit]\nAfter=console-login-helper-messages-issuegen.service\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/stage2-containers.sh\n[Install]\nWantedBy=multi-user.target\n", + "enabled": true, + "name": "stage2-containers.service" + } + ]} +} diff --git a/upi/vsphere/stage1/2.create-bastion/create-bastion.ps1 b/upi/vsphere/stage1/2.create-bastion/create-bastion.ps1 new file mode 100644 index 0000000..18872d0 --- /dev/null +++ b/upi/vsphere/stage1/2.create-bastion/create-bastion.ps1 @@ -0,0 +1,114 @@ +########################################################################### +# This generates ignition for the Bastion +# and then creates and configures the VM +########################################################################### +# Inputs are from ./config.json, +# username/passiword for vCenter is from ./secrets.json +########################################################################### + +Set-PowerCLIConfiguration -Scope User -Confirm:$false -ParticipateInCEIP $false +Set-PowerCLIConfiguration -InvalidCertificateAction:ignore -Confirm:$false + +# Read in the configs +$ClusterConfig = Get-Content -Raw -Path /tmp/workingdir/config.json | ConvertFrom-Json +$SecretConfig = Get-Content -Raw -Path /tmp/workingdir/secrets.json | ConvertFrom-Json + +$vcenterIp = $ClusterConfig.vsphere.vsphere_server +$vcenterUser = $SecretConfig.vcenterdeploy.username +$vcenterPassword = $SecretConfig.vcenterdeploy.password + +# Some experiments with arrays +write-host "clusterid: " $ClusterConfig.clusterid +write-host "bastion hostname: " $ClusterConfig.bastion.hostname +write-host "bastion ip: " $ClusterConfig.bastion.ipaddress +write-host "number of masters: " $ClusterConfig.masters.Count +write-host "second master name: " $ClusterConfig.masters[1].hostname + +# Extract some vars - not really needed but ... +$global:bastion_ip = $ClusterConfig.bastion.ipaddress +$global:bastion_mask_prefix = $ClusterConfig.network.maskprefix +$global:bastion_dfgw = $ClusterConfig.network.defaultgw +$global:cluster_domain = ($ClusterConfig.clusterid + "." + $ClusterConfig.basedomain) +$global:bastion_dns1 = $ClusterConfig.network.upstreamdns1 +$global:bastion_dns2 = $ClusterConfig.network.upstreamdns2 +$global:bastion_hostname = $ClusterConfig.bastion.hostname +$global:id_rsa_pub = $ClusterConfig.sshpubkey +$global:registryurl = $ClusterConfig.registryurl +$global:registryusername = $ClusterConfig.registryusername +$global:imagetag = $ClusterConfig.imagetag +$global:useletsencrypt = $ClusterConfig.useletsencrypt + +try +{ + $global:sshprivkey = [Convert]::ToBase64String([IO.File]::ReadAllBytes('/tmp/workingdir/deploy.pem')) +} +catch +{ + Write-Output "deploy.pem needs to be in the /tmp/workingdir mount" + Exit +} + +# Generate the ifcfg script and convert to base64 +$ifcfg = Invoke-EpsTemplate -Path ./ifcfg.tmpl + +$global:ifcfgbase64 = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($ifcfg)) +write-host $ifcfg +write-host $ifcfgbase64 + +$stage2 = Invoke-EpsTemplate -Path ./stage2-containers.tmpl +$global:stage2base64 = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($stage2)) + +$global:configbase64 = [Convert]::ToBase64String([IO.File]::ReadAllBytes('/tmp/workingdir/config.json')) +write-host $configbase64 + +$global:secretbase64 = [Convert]::ToBase64String([IO.File]::ReadAllBytes('/tmp/workingdir/secrets.json')) +write-host $secretbase64 + +$global:registryauthbase64 = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($SecretConfig.registrytoken)) +write-host $SecretConfig.registrytoken +write-host $global:registryauthbase64 + +$global:hostnamebase64 = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($bastion_hostname)) +write-host $bastion_hostname +write-host $hostnamebase64 + + +# Generate the Ignition config and convert to base64 +$bastion_ign = Invoke-EpsTemplate -Path ./bastion-ignition.tmpl +$bastion_ignbase64 = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($bastion_ign)) + +write-host -ForegroundColor green "Created ignition: " $bastion_ign + +# Connect to vCenter +Connect-VIServer –Server $vcenterIp -username $vcenterUser -password $vcenterPassword + +# Generate objects needed for VM creation and config +#$portgroup = Get-VDPortgroup -Name $ClusterConfig.vsphere.vsphere_network +#$template = Get-VM -Name $ClusterConfig.vsphere.rhcos_template +$template = Get-Template -Name $ClusterConfig.vsphere.rhcos_template +$datastore = Get-Datastore -Name $ClusterConfig.vsphere.vsphere_datastore +$resourcePool = Get-ResourcePool -Name $ClusterConfig.vsphere.vsphere_resourcepool +$folder = Get-Folder -Name $ClusterConfig.vsphere.vsphere_folder + +# Currently the portgroup name is obtained from NSX; this can cause problems when duplicate net names +# are present in the vCenter +## DISABLED FOR PREDEPLOYED NETWORKS +$portgroup = $ClusterConfig.vsphere.vsphere_portgroup +#Connect-NsxServer -vCenterServer $vcenterIp -username $vcenterUser -password $vcenterPassword +#$sw = Get-NsxLogicalSwitch -name $ClusterConfig.vsphere.vsphere_network +#$virtualNetworkXml = [xml]$sw.outerxml +#$dvPortGroupId = $virtualNetworkXml.virtualWire.vdsContextWithBacking.backingValue +#$portgroup = Get-VDPortgroup | Where-Object {$_.key -eq $dvPortGroupId } + +# Create VM, cloning an existing VM +$vm = New-VM -Name $bastion_hostname -Template $template -Location $folder -Datastore $datastore -ResourcePool $resourcePool -confirm:$false + +# Change config on VM including setting ignition as a property +$vm | Set-VM -NumCpu 1 -MemoryGB 2 -confirm:$false +$vm | Get-NetworkAdapter | Set-NetworkAdapter -Portgroup $portgroup -confirm:$false +$vm | New-AdvancedSetting -Name "guestinfo.ignition.config.data" -Value $bastion_ignbase64 -confirm:$false +$vm | New-AdvancedSetting -Name "guestinfo.ignition.config.data.encoding" -Value "base64" -confirm:$false +$vm | New-AdvancedSetting -Name "disk.EnableUUID" -Value "TRUE" -confirm:$false + +# Power on the new VM +$vm | Start-VM -confirm:$false diff --git a/upi/vsphere/stage1/2.create-bastion/ifcfg.tmpl b/upi/vsphere/stage1/2.create-bastion/ifcfg.tmpl new file mode 100644 index 0000000..0949949 --- /dev/null +++ b/upi/vsphere/stage1/2.create-bastion/ifcfg.tmpl @@ -0,0 +1,11 @@ +TYPE=Ethernet +BOOTPROTO=none +NAME=ens192 +DEVICE=ens192 +ONBOOT=yes +IPADDR=<%= $bastion_ip %> +PREFIX=<%= $bastion_mask_prefix %> +GATEWAY=<%= $bastion_dfgw %> +DOMAIN=<%= $cluster_domain %> +DNS1=<%= $bastion_dns1 %> +DNS2=<%= $bastion_dns2 %> diff --git a/upi/vsphere/stage1/2.create-bastion/stage2-containers.tmpl b/upi/vsphere/stage1/2.create-bastion/stage2-containers.tmpl new file mode 100644 index 0000000..fa2ab25 --- /dev/null +++ b/upi/vsphere/stage1/2.create-bastion/stage2-containers.tmpl @@ -0,0 +1,97 @@ +#!/bin/bash +# Script to pull stage2 containers from registry and run stage2 + +podman login --tls-verify=false -u "<%= $registryusername %>" -p "$(cat /home/core/registry.auth)" https://<%= $registryurl %> +podman pull --tls-verify=false <%= $registryurl %>/3.setup-bastion:<%= $imagetag %> +podman pull --tls-verify=false <%= $registryurl %>/4.run-installer:<%= $imagetag %> +podman pull --tls-verify=false <%= $registryurl %>/6.add-ignition:<%= $imagetag %> +podman pull --tls-verify=false <%= $registryurl %>/5.ign-webserver:<%= $imagetag %> +podman pull --tls-verify=false <%= $registryurl %>/7.terraform-deploy:<%= $imagetag %> +podman pull --tls-verify=false <%= $registryurl %>/8.post-deployment:<%= $imagetag %> + +<% if($useletsencrypt -eq "True" ) { %> +podman pull --tls-verify=false <%= $registryurl %>/letsencrypt:<%= $imagetag %> +<% } %> + +## INPUTS: config.json, secrets.json (in /home/core/deployconfig on bastion) +# Run "3.setup-bastion/Dockerfile" +podman run -v /home/core/deployconfig:/tmp/workingdir:z 3.setup-bastion:<%= $imagetag %> +## OUTPUTS: ansible-hosts (for ansible post deploy task), install-config.yaml (for installer) + +## INPUTS: config.json, secrets.json install-config.yaml +# Run "4.run-installer/Dockerfile" +podman run -v /home/core/deployconfig:/tmp/workingdir:z 4.run-installer:<%= $imagetag %> +## OUTPUTS: worker.ign bootstrap.ign master.ign + +## INPUTS: bootstrap.ign +# Run "5.ign-webserver/Dockerfile" +podman stop ign-webserver +podman rm ign-webserver +podman run --name ign-webserver -d -v /home/core/deployconfig/bootstrap.ign:/usr/share/nginx/html/bootstrap.ign:z --network host -p 80:80 5.ign-webserver:<%= $imagetag %> +## OUTPUTS: none (container continues to run) Container serves the ign file from the bastions IP + +## INPUTS: config.json worker.ign bootstrap.ign master.ign +# Run "6.add-ignition/Dockerfile" +podman run -v /home/core/deployconfig:/tmp/workingdir:z 6.add-ignition:<%= $imagetag %> +## OUTPUTS: config.json (updated with ign) + +## INPUTS: config.json secrets.json +# Run "7.terraform-deploy/Dockerfile" +podman run -v /home/core/deployconfig:/tmp/workingdir:z 7.terraform-deploy:<%= $imagetag %> +## OUTPUTS: VMs are created. terraform.tfstate + + +# Wait a while for svcs machines to start.. +echo "Waiting a minute or so for svcs VMs to start before DNS initialisation" +sleep 80 + + +## INPUTS: ansible-hosts from stage 3, +# Run "8.post-deployment/Dockerfile" +podman run -v /home/core/deployconfig:/tmp/workingdir:z 8.post-deployment:<%= $imagetag %> +## OUTPUTS: VMs are configured for DNS (so OpenShift initialisation can proceed) + +echo "Waiting 10 minutes for cluster to start up before polling..." +sleep 600 + +# Remove webserver container +echo "Remove webserver container" +podman stop ign-webserver +podman rm ign-webserver + +# Wait for install to complete and then immediately kill off bootstrap +podman run --entrypoint="/usr/local/4.run-installer/waitforcomplete.sh" -v /home/core/deployconfig:/tmp/workingdir:z 4.run-installer:<%= $imagetag %> +## DISABLED to allow troubleshooting of intermittant issue: +###podman run --entrypoint="./removebootstrap.sh" -v /home/core/deployconfig:/tmp/workingdir:z 7.terraform-deploy:<%= $imagetag %> + +echo "openshift-v4 installer completed: Stage1 9.finalise-install now needs to be ran from outside to remove bootstrap from LB pools and change monitors" + +<% if($useletsencrypt -eq "True" ) { %> +echo "Waiting an extra 15 minutes for cluster to fully initialise before putting signed certs in place" +sleep 900 + +podman run -v /bin/oc:/usr/local/bin/oc:ro -v /home/core/deployconfig:/root:z letsencrypt:<%= $imagetag %> + +echo "Enable Timer service to renew lets encrypt certs" +echo "[Unit] +Description=Renews Cluster certs in letsencrypt +[Service] +Type=oneshot +ExecStart=/usr/bin/sh -c 'podman run --entrypoint=\"/usr/local/letsencrypt/renew.sh\" -itv /home/core/deployconfig:/root:z -v /bin/oc:/usr/local/bin/oc:ro letsencrypt:<%= $imagetag %>'" > /etc/systemd/system/renewcerts.service + +echo "[Unit] +Description=Run Renew certs every 10mins +[Timer] +OnCalendar=daily +RandomizedDelaySec=3600" > /etc/systemd/system/renewcerts.timer + +systemctl daemon-reload +systemctl enable renewcerts.timer +systemctl start renewcerts.timer +<% } %> + +echo "Disabling/removing service" +systemctl disable stage2-containers.service +rm /etc/systemd/system/stage2-containers.service +systemctl daemon-reload +systemctl reset-failed stage2-containers.service diff --git a/upi/vsphere/stage1/9.finalise-install/Dockerfile b/upi/vsphere/stage1/9.finalise-install/Dockerfile new file mode 100644 index 0000000..402a285 --- /dev/null +++ b/upi/vsphere/stage1/9.finalise-install/Dockerfile @@ -0,0 +1,8 @@ +FROM microsoft/powershell:latest + +ADD . /usr/local/9.finalise-install +RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX,EPS +# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is +WORKDIR /usr/local/9.finalise-install +ENTRYPOINT pwsh -Command ./finalise-install.ps1 diff --git a/upi/vsphere/stage1/9.finalise-install/finalise-install.ps1 b/upi/vsphere/stage1/9.finalise-install/finalise-install.ps1 new file mode 100644 index 0000000..74c307a --- /dev/null +++ b/upi/vsphere/stage1/9.finalise-install/finalise-install.ps1 @@ -0,0 +1,68 @@ +## Finalise Install +## - Remove bootstrap from LB pools +## - Change 6443 API monitor to HTTPS type + +Set-PowerCLIConfiguration -Scope User -Confirm:$false -ParticipateInCEIP $false +Set-PowerCLIConfiguration -InvalidCertificateAction:ignore -Confirm:$false + +$ClusterConfig = Get-Content -Raw -Path /tmp/workingdir/config.json | ConvertFrom-Json +$SecretConfig = Get-Content -Raw -Path /tmp/workingdir/secrets.json | ConvertFrom-Json + +$vcenterIp = $ClusterConfig.vsphere.vsphere_server +$vcenterUser = $SecretConfig.vcenterdeploy.username +$vcenterPassword = $SecretConfig.vcenterdeploy.password + + +# Declare essential parameters +$transportZoneName = $ClusterConfig.vsphere.vsphere_transportzone +$edgeInternalIp = $ClusterConfig.loadbalancer.internalvip +$edgeExternalIp = $ClusterConfig.loadbalancer.externalvip +$edgeName = $ClusterConfig.vsphere.vsphere_edge +$masterIps = @($ClusterConfig.masters[0].ipaddress, $ClusterConfig.masters[1].ipaddress, $ClusterConfig.masters[2].ipaddress) +$infraIps = @($ClusterConfig.infras[0].ipaddress, $ClusterConfig.infras[1].ipaddress) +$bootstrapIp = $ClusterConfig.bootstrap.ipaddress +$snmask = $ClusterConfig.network.maskprefix + +# Globals to allow templating engine to work: +$global:defaultgw = $ClusterConfig.network.defaultgw +$global:dnsip = $ClusterConfig.svcs[0].ipaddress + +# connect to the vcenter/nsx with SSO +Connect-NsxServer -vCenterServer $vcenterIp -username $vcenterUser -password $vcenterPassword + +# populate the edge variable with the appropriate edge +$edge = Get-NsxEdge $edgeName +write-host -ForegroundColor cyan "Using vSE: " $edgeName + +# Obtain LB object +$loadbalancer = $edge | Get-NsxLoadBalancer + +# create application profile +#$appProfile = $loadbalancer | New-NsxLoadBalancerApplicationProfile -Type TCP -Name "tcp-source-persistence" -PersistenceMethod sourceip + +# Make a new Monitor and then get it redundantly to make sure we have it if it already exists +$apiMonitor = $edge | Get-NsxLoadBalancer | New-NsxLoadBalancerMonitor -Name openshift_6443_monitor -Typehttps -interval 3 -Timeout 5 -maxretries 2 -Method GET -url "/healthz" -Expected "200" -Receive "ok" +$apiMonitor = $edge | Get-NsxLoadBalancer | Get-NsxLoadBalancerMonitor openshift_6443_monitor + +$masterPoolApi = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Get-NsxLoadBalancerPool master-pool-6443 +$masterPoolMachine = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Get-NsxLoadBalancerPool master-pool-22623 +$infraHttpsPool = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Get-NsxLoadBalancerPool infra-https-pool +$infraHttpPool = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Get-NsxLoadBalancerPool infra-http-pool + +# Remove bootstrap machine from the Api pools + +$apiBootstrapMember = $masterPoolApi | Get-NsxLoadBalancerPoolMember -Name "bootstrap-0" +Remove-NsxLoadBalancerPoolMember $apiBootstrapMember -Confirm:$false + +$machineBootstrapMember = $masterPoolMachine | Get-NsxLoadBalancerPoolMember -Name "bootstrap-0" +Remove-NsxLoadBalancerPoolMember $machineBootstrapMember -Confirm:$false + +# Change the monitor for 6443 API pool +$uri = "/api/4.0/edges/$($edge.id)/loadbalancer/config/pools/$($masterPoolApi.poolId)" +Write-Output -InputObject "Fetching monitor xml" +[xml]$poolxml = Invoke-NsxWebRequest -method "get" -uri $uri -connection $nsxConnection +# Replace pool monitorid with 6443 API pool +Write-Output -InputObject "Replacing monitor xml id: $($poolxml.pool.monitorId) with new id: $($apiMonitor.monitorId)" +$poolxml.pool.monitorId = $apiMonitor.monitorId +Write-Output -InputObject "Request body: $($poolxml.InnerXml)" +Invoke-NsxWebRequest -method "put" -uri $uri -body $poolxml.InnerXml -connection $nsxConnection diff --git a/upi/vsphere/stage1/README.md b/upi/vsphere/stage1/README.md new file mode 100644 index 0000000..a7a8654 --- /dev/null +++ b/upi/vsphere/stage1/README.md @@ -0,0 +1,17 @@ +# Stage 1 manual deployment guide + +deploy.pem (private ssh key for pub key listed in config.json) needs to be in the dir mounted to /tmp/workingdir + +## Step 1 - setup-env +``` +cd 1.setup-env +sudo podman build ./ -t 1.setup-env:0.1 +sudo podman run -v ~/git/openshift-v4/upi/vsphere/stage1:/tmp/workingdir:z 1.setup-env:0.1 +``` + +## Step 2 - create-bastion +``` +cd ../2.create-bastion +sudo podman build ./ -t 2.create-bastion:0.1 +sudo podman run -v ~/git/openshift-v4/upi/vsphere/stage1:/tmp/workingdir:z 2.create-bastion:0.1 +``` diff --git a/upi/vsphere/stage1/build.sh b/upi/vsphere/stage1/build.sh new file mode 100644 index 0000000..36a37b0 --- /dev/null +++ b/upi/vsphere/stage1/build.sh @@ -0,0 +1,5 @@ +TAG=0.5 + +sudo podman build ./1.setup-env -t 1.setup-env:${TAG} --no-cache +sudo podman build ./2.create-bastion -t 2.create-bastion:${TAG} --no-cache +sudo podman build ./9.finalise-install -t 9.finalise-install:${TAG} --no-cache diff --git a/upi/vsphere/stage1/config.json.example b/upi/vsphere/stage1/config.json.example new file mode 100644 index 0000000..63a2f09 --- /dev/null +++ b/upi/vsphere/stage1/config.json.example @@ -0,0 +1,95 @@ +{ + "clusterid": "ocp4test", + "basedomain": "cna.ukcloud.uk", + "useletsencrypt": "True", + "bastion": { + "hostname": "bastion-0", + "ipaddress": "10.1.1.254", + "registry": "registry.example.com" + }, + "bootstrap": { + "hostname": "bootstrap-0", + "ipaddress": "10.1.1.250" + }, + "svcs": [ + { + "hostname": "svc-0", + "ipaddress": "10.1.1.15" + }, + { + "hostname": "svc-1", + "ipaddress": "10.1.1.16" + } + ], + "masters": [ + { + "hostname": "master-0", + "ipaddress": "10.1.1.10" + }, + { + "hostname": "master-1", + "ipaddress": "10.1.1.11" + }, + { + "hostname": "master-2", + "ipaddress": "10.1.1.12" + } + ], + "infras": [ + { + "hostname": "infra-0", + "ipaddress": "10.1.1.20" + }, + { + "hostname": "infra-1", + "ipaddress": "10.1.1.21" + } + ], + "smallworkers": [ + { + "hostname": "worker-s-0", + "ipaddress": "10.1.1.25" + }, + { + "hostname": "worker-s-1", + "ipaddress": "10.1.1.26" + } + ], + "mediumworkers": [], + "largeworkers": [], + "network": { + "networkip": "10.1.1.0", + "maskprefix": "24", + "defaultgw": "10.1.1.1", + "upstreamdns1": "1.1.1.1", + "upstreamdns2": "1.0.0.1" + }, + "loadbalancer": { + "externalvip": "51.9.9.9", + "internalvip": "10.1.1.1" + }, + "sshpubkey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA ..... ONO+WJe5kAdO+Quiy/7LdKRxzfYA/OKXjsU7+wHCFX14tD53mao1FP1na0Q== admin@localhost.localdomain", + "registryurl": "registry.access.redhat.com", + "registryusername": "\\$app", + "imagetag": "0.5", + "vsphere": { + "vsphere_server": "vcenter ip / name", + "vsphere_cluster": "cluster name", + "vsphere_resourcepool": "ocp4test", + "vsphere_folder": "ocp4test", + "vsphere_datacenter": "Datacenter", + "vsphere_datastore": "datastorename", + "vsphere_edge": "vse-VSHIELDNAME (ddddddd-eeee-ffff-22224-33382140355)", + "vsphere_transportzone": "PvDC.VXLAN-NP", + "vsphere_network": "dvs.VCDVSNetworkName36370-cb31-40c1-8cc6-036a3b8e1e39", + "vsphere_portgroup": "vxw-dvs-32-virtualwire-5-sid-16001-dvs.VCDVSNetworkname36370-c", + "rhcos_template": "rhcos-4.2.0-template" + }, + "ignition": { + "master_ignition": "", + "worker_ignition": "", + "infra_ignition": "", + "svc_ignition": "", + "bootstrap_ignition_url": "" + } +} diff --git a/upi/vsphere/stage1/deploy.sh b/upi/vsphere/stage1/deploy.sh new file mode 100644 index 0000000..731f90d --- /dev/null +++ b/upi/vsphere/stage1/deploy.sh @@ -0,0 +1,20 @@ + + +############ +# Placeholder sequencing script for stage1 +############ +TAG=0.5 + +## INPUTS: config.json, secrets.json +# Run "1.setup-env/Dockerfile", Container name: TBC +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 1.setup-env:${TAG} +## OUTPUTS: (NSX: Virtual Network, VSE Interface, VSE DHCP, VSE Loadbalancer Config) + +## INPUTS: config.json, secrets.json +# Run "2.create-bastion/Dockerfile", Container name: TBC +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 2.create-bastion:${TAG} +## OUTPUTS: (vCenter: Bastion RHCOS VM with Network Adapter connected and correct Ignition to assign IP/SSH Key) +### TODO: Inject config.json, secrets.json into the VM? + + + diff --git a/upi/vsphere/stage1/secrets.json.example b/upi/vsphere/stage1/secrets.json.example new file mode 100644 index 0000000..4a4b6b9 --- /dev/null +++ b/upi/vsphere/stage1/secrets.json.example @@ -0,0 +1,17 @@ +{ + "rhpullsecret": pullsecret_json_not_in_quotes, + "registrytoken": "tokentoaccessregistry", + "vcenterdeploy": { + "username": "User@vsphere.local", + "password": "passwordhere" + }, + "vcentervolumeprovisioner": { + "username": "User@vsphere.local", + "password": "passwordhere" + }, + "dns": { + "username": "username", + "password": "passwordhere" + }, + "registrytoken": "" +} diff --git a/upi/vsphere/stage2/3.setup-bastion/Dockerfile b/upi/vsphere/stage2/3.setup-bastion/Dockerfile new file mode 100644 index 0000000..3326d25 --- /dev/null +++ b/upi/vsphere/stage2/3.setup-bastion/Dockerfile @@ -0,0 +1,8 @@ +FROM microsoft/powershell:latest + +ADD . /usr/local/3.setup-bastion +RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +RUN pwsh -Command Install-Module EPS +# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is +WORKDIR /usr/local/3.setup-bastion +ENTRYPOINT pwsh -Command ./setup-bastion.ps1 diff --git a/upi/vsphere/stage2/3.setup-bastion/README.md b/upi/vsphere/stage2/3.setup-bastion/README.md new file mode 100644 index 0000000..6f4b721 --- /dev/null +++ b/upi/vsphere/stage2/3.setup-bastion/README.md @@ -0,0 +1,5 @@ + + +`sudo podman build ./ -t 3.setup-bastion:0.1` + +`sudo podman run -v ~/deployconfig:/tmp/workingdir:z 3.setup-bastion:0.1` diff --git a/upi/vsphere/stage2/3.setup-bastion/ansible-hosts.tmpl b/upi/vsphere/stage2/3.setup-bastion/ansible-hosts.tmpl new file mode 100644 index 0000000..ce5498d --- /dev/null +++ b/upi/vsphere/stage2/3.setup-bastion/ansible-hosts.tmpl @@ -0,0 +1,58 @@ +[all:children] +masters +workers +infras +svcs +bootstrap +bastion + +# All hosts need to be listed under the correct group, with an "ip=" parameter +# Empty groups are acceptable if required + +[masters] +<%= $masters[0].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $masters[0].ipaddress %> +<%= $masters[1].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $masters[1].ipaddress %> +<%= $masters[2].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $masters[2].ipaddress %> + +[workers] +<% if($sworkers.Count -gt 0 ) { %><% 0..($sworkers.Count-1) | %{ -%><%= $sworkers[$_].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $sworkers[$_].ipaddress %> +<% } -%><% } %> +<% if($mworkers.Count -gt 0 ) { %><% 0..($mworkers.Count-1) | %{ -%><%= $mworkers[$_].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $mworkers[$_].ipaddress %> +<% } -%><% } %> +<% if($lworkers.Count -gt 0 ) { %><% 0..($lworkers.Count-1) | %{ -%><%= $lworkers[$_].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $lworkers[$_].ipaddress %> +<% } -%><% } %> + +[infras] +<% if($infras.Count -gt 0 ) { %><% 0..($infras.Count-1) | %{ -%><%= $infras[$_].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $infras[$_].ipaddress %> +<% } -%><% } %> + +[svcs] +<% if($svcs.Count -gt 0 ) { %><% 0..($svcs.Count-1) | %{ -%><%= $svcs[$_].hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $svcs[$_].ipaddress %> ansible_host=<%= $svcs[$_].ipaddress %> +<% } -%><% } %> + +[bootstrap] +<%= $bootstrap.hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $bootstrap.ipaddress %> + +[bastion] +<%= $bastion.hostname %>.<%= $clusterid %>.<%= $basedomain %> ip=<%= $bastion.ipaddress %> + +[all:vars] +ntp_server1=0.uk.pool.ntp.org +ntp_server2=1.uk.pool.ntp.org +dns_server1=<%= $upstreamdns1 %> +dns_server2=<%= $upstreamdns2 %> +domain_suffix=<%= $clusterid %>.<%= $basedomain %> +external_lb_ip=<%= $externalvip %> + +dns_username=<%= $dnsusername %> +dns_password=<%= $dnsuserpassword %> + +image_tag=<%= $imagetag %> + +# Default gateway is also the api-int IP +default_gateway=<%= $internalvip %> + +ansible_ssh_user=core +ansible_become=yes +ansible_ssh_common_args='-o StrictHostKeyChecking=no' +ansible_ssh_private_key_file=/tmp/workingdir/deploy.pem diff --git a/upi/vsphere/stage2/3.setup-bastion/install-config.tmpl b/upi/vsphere/stage2/3.setup-bastion/install-config.tmpl new file mode 100644 index 0000000..191f4eb --- /dev/null +++ b/upi/vsphere/stage2/3.setup-bastion/install-config.tmpl @@ -0,0 +1,23 @@ +apiVersion: v1 +baseDomain: "<%= $basedomain %>" #generate from basedomain +metadata: + name: "<%= $clusterid %>" +compute: +- hyperthreading: Enabled + name: worker + replicas: 3 +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 +networking: + machineCIDR: "<%= $machinecidr %>" #generate from network.networkip and network.maskprefix e.g 192.168.0.0/24 +platform: + vsphere: + vCenter: "<%= $vcenterserver %>" #generate from vsphere.vsphere_server + username: "<%= $vcenteruser %>" #generate from secret.vcenterdeploy.username + password: "<%= $vcenterpassword %>" #generate from secret.vcenterdeploy.password + datacenter: "<%= $vcenterdatacenter %>" #generate from vsphere.vsphere_datacenter + defaultDatastore: "<%= $vsandatastore %>" #generate from vsphere.vsphere_datastore +pullsecret: '<%= $pullsecret %>' #generate from secret.rhpullsecret +sshKey: '<%= $sshpubkey %>' #generate from sshpubkey diff --git a/upi/vsphere/stage2/3.setup-bastion/setup-bastion.ps1 b/upi/vsphere/stage2/3.setup-bastion/setup-bastion.ps1 new file mode 100755 index 0000000..1625234 --- /dev/null +++ b/upi/vsphere/stage2/3.setup-bastion/setup-bastion.ps1 @@ -0,0 +1,62 @@ +########################################################################### +# This generates ignition for the Bastion +# and then creates and configures the VM +########################################################################### +# Inputs are from ./config.json, +# username/passiword for vCenter is from ./secrets.json +########################################################################### + +# Read in the configs +$ClusterConfig = Get-Content -Raw -Path /tmp/workingdir/config.json | ConvertFrom-Json +$SecretConfig = Get-Content -Raw -Path /tmp/workingdir/secrets.json | ConvertFrom-Json + +# Read vars from config file +$global:basedomain = $ClusterConfig.basedomain +$global:machinecidr = ($ClusterConfig.network.networkip + "/" + $ClusterConfig.network.maskprefix) +$global:vcenterserver = $ClusterConfig.vsphere.vsphere_server +$global:vcenterdatacenter = $ClusterConfig.vsphere.vsphere_datacenter +$global:vsandatastore = $ClusterConfig.vsphere.vsphere_datastore +$global:sshpubkey = $ClusterConfig.sshpubkey + +# Vars for Ansible hosts file +$global:clusterid = $ClusterConfig.clusterid +$global:masters = $ClusterConfig.masters +$global:sworkers = $ClusterConfig.smallworkers +$global:mworkers = $ClusterConfig.mediumworkers +$global:lworkers = $ClusterConfig.largeworkers +$global:infras = $ClusterConfig.infras +$global:svcs = $ClusterConfig.svcs +$global:bootstrap = $ClusterConfig.bootstrap +$global:bastion = $ClusterConfig.bastion +$global:externalvip = $ClusterConfig.loadbalancer.externalvip +$global:internalvip = $ClusterConfig.loadbalancer.internalvip +$global:upstreamdns1 = $ClusterConfig.network.upstreamdns1 +$global:upstreamdns2 = $ClusterConfig.network.upstreamdns2 +$global:imagetag = $ClusterConfig.imagetag + +# Read vars from secret file +$global:vcenteruser = $SecretConfig.vcentervolumeprovisioner.username +$global:vcenterpassword = $SecretConfig.vcentervolumeprovisioner.password +$global:pullsecret = $SecretConfig.rhpullsecret | ConvertTo-Json + +if($ClusterConfig.useletsencrypt) { + if($ClusterConfig.useletsencrypt -eq 'True') { + $global:dnsusername = $SecretConfig.dns.username + $global:dnsuserpassword = $SecretConfig.dns.password + write-host -ForegroundColor green "Lets Encrypt: TRUE" + } +} + +write-host -ForegroundColor green "Pull Secret: " $global:pullsecret + +# Invoke template to generate the ansible-hosts file +$ansiblehosts = Invoke-EpsTemplate -Path ./ansible-hosts.tmpl +write-host -ForegroundColor green "Ansible hosts: " $ansiblehosts +Out-File -FilePath /tmp/workingdir/ansible-hosts -InputObject $ansiblehosts +write-host -ForegroundColor green "Created ansible-hosts file" + +# Invoke template to generate the install-config file +$installconfig = Invoke-EpsTemplate -Path ./install-config.tmpl +Out-File -FilePath /tmp/workingdir/install-config.yaml -InputObject $installconfig +write-host -ForegroundColor green "Created install-config.yaml file" + diff --git a/upi/vsphere/stage2/4.run-installer/Dockerfile b/upi/vsphere/stage2/4.run-installer/Dockerfile new file mode 100644 index 0000000..fb0faa5 --- /dev/null +++ b/upi/vsphere/stage2/4.run-installer/Dockerfile @@ -0,0 +1,15 @@ +FROM centos:centos7 + +RUN yum -y install epel-release +RUN yum -y install wget tar gzip json-tools jq +WORKDIR /tmp +ADD . /usr/local/4.run-installer +RUN wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/release.txt ;\ + export version=$(grep Version: release.txt | cut -d ' ' -f 5-) ;\ + wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux-$version.tar.gz ;\ + wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-install-linux-$version.tar.gz ;\ + tar -xvzf /tmp/openshift-client-linux-$version.tar.gz -C /usr/local/bin ;\ + tar -xvzf /tmp/openshift-install-linux-$version.tar.gz -C /usr/local/bin ; +# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is +WORKDIR /tmp/workingdir +ENTRYPOINT /usr/local/4.run-installer/entrypoint.sh diff --git a/upi/vsphere/stage2/4.run-installer/README.md b/upi/vsphere/stage2/4.run-installer/README.md new file mode 100644 index 0000000..1b1288a --- /dev/null +++ b/upi/vsphere/stage2/4.run-installer/README.md @@ -0,0 +1,5 @@ + + +`sudo podman build ./ -t 4.run-installer:0.1` + +`sudo podman run ~/deployconfig:/tmp/workingdir:z 4.run-installer:0.1` diff --git a/upi/vsphere/stage2/4.run-installer/entrypoint.sh b/upi/vsphere/stage2/4.run-installer/entrypoint.sh new file mode 100755 index 0000000..afec6a0 --- /dev/null +++ b/upi/vsphere/stage2/4.run-installer/entrypoint.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +function get_config () { + # function for getting config: + # Eg: + # get_config "sshpubkey" + # get_config "svcs[0].hostname" + cat /tmp/workingdir/config.json | jq .$1 +} + +# Clear state so that new install CA is created +mv .openshift_install_state.json .openshift_install_state.json.old +mv terraform.tfstate .terraform.tfstate.old +rm -rf auth/ *.ign metadata.json + +# Backup install config so it can be looked at later if needed +cp install-config.yaml .install-config.yaml.bak + +# Create manifests +openshift-install create manifests + +# Substitute folder name +CLUSTERID=$( get_config "clusterid" | sed 's/"//g' ) +FOLDERNAME=$( get_config "vsphere.vsphere_folder" | sed 's/"//g' ) + +echo "Cloud provider config before:" +cat manifests/cloud-provider-config.yaml +sed -i "s/folder = ${CLUSTERID}/folder = ${FOLDERNAME}/g" manifests/cloud-provider-config.yaml + +echo "\n\nCloud provider config after edit:" +cat manifests/cloud-provider-config.yaml + +# Remove apps. prefix (DISABLED) +#sed -i "s/apps.//g" manifests/cluster-ingress-02-config.yml + +echo "This is the manifest for ingress:" +cat manifests/cluster-ingress-02-config.yml + + +# Create ignition +openshift-install create ignition-configs +cp worker.ign infra.ign diff --git a/upi/vsphere/stage2/4.run-installer/waitforcomplete.sh b/upi/vsphere/stage2/4.run-installer/waitforcomplete.sh new file mode 100755 index 0000000..6644b24 --- /dev/null +++ b/upi/vsphere/stage2/4.run-installer/waitforcomplete.sh @@ -0,0 +1,3 @@ +#!/bin/bash +# Check install status +openshift-install wait-for bootstrap-complete --log-level=info diff --git a/upi/vsphere/stage2/5.ign-webserver/Dockerfile b/upi/vsphere/stage2/5.ign-webserver/Dockerfile new file mode 100644 index 0000000..81d185b --- /dev/null +++ b/upi/vsphere/stage2/5.ign-webserver/Dockerfile @@ -0,0 +1,2 @@ +FROM nginx:alpine +# Volume mount needs to be for /tmp/workingdir to wherever bootstrap.ign is diff --git a/upi/vsphere/stage2/5.ign-webserver/README.md b/upi/vsphere/stage2/5.ign-webserver/README.md new file mode 100644 index 0000000..42f8c7d --- /dev/null +++ b/upi/vsphere/stage2/5.ign-webserver/README.md @@ -0,0 +1,17 @@ +## Container to host bootstrap.ign + +Build the container +`sudo podman build ./ -t 5.ign-webserver:0.1` + + +Run the container +``` +sudo podman run --name ign-webserver -d -v ~/deployconfig/bootstrap.ign:/usr/share/nginx/html/bootstrap.ign:z --network host -p 80:80 5.ign-webserver:0.1 +sudo nft flush tables +``` + +This results in the bootstrap.ign being served on http://\/bootstrap.ign + + +Stop the container +`sudo podman stop ign-webserver; sudo podman rm ign-webserver` diff --git a/upi/vsphere/stage2/6.add-ignition/Dockerfile b/upi/vsphere/stage2/6.add-ignition/Dockerfile new file mode 100644 index 0000000..36f4e16 --- /dev/null +++ b/upi/vsphere/stage2/6.add-ignition/Dockerfile @@ -0,0 +1,8 @@ +FROM microsoft/powershell:latest + +ADD . /usr/local/6.add-ignition +RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted +RUN pwsh -Command Install-Module EPS +# Volume mount needs to be for /tmp/workingdir to wherever the ignition files and config.json +WORKDIR /tmp/workingdir +ENTRYPOINT pwsh -Command /usr/local/6.add-ignition/add-ignition.ps1 diff --git a/upi/vsphere/stage2/6.add-ignition/README.md b/upi/vsphere/stage2/6.add-ignition/README.md new file mode 100644 index 0000000..2082f2a --- /dev/null +++ b/upi/vsphere/stage2/6.add-ignition/README.md @@ -0,0 +1,13 @@ +# PowerShell to add ignition details (after openshift-install has been ran) + +syntax: +```pwsh ./add_ignition.ps1 ``` + +example: +``` pwsh ./add-ignition.ps1 ./terraform.tfvars.json ./terraform.tfvars.json.out ~/openshift-install-linux-4.1.9/newconfig8/master.ign ~/openshift-install-linux-4.1.9/newconfig8/worker.ign ~/openshift-install-linux-4.1.9/newconfig8/infra.ign ~/openshift-install-linux-4.1.9/newconfig8/svc.ign https://gist.githubusercontent.com/gellner/18d581d5737eeacc4e562a97db96a1e4/raw/b27f0336fa2c17cfb733af44a120feed42cab8f0/bootstrap17.ign``` + + +# To run in container... + +`sudo podman build ./ -t 6.add-ignition:0.1 +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 6.add-ignition:0.1` diff --git a/upi/vsphere/stage2/6.add-ignition/add-ignition.ps1 b/upi/vsphere/stage2/6.add-ignition/add-ignition.ps1 new file mode 100644 index 0000000..fc4a986 --- /dev/null +++ b/upi/vsphere/stage2/6.add-ignition/add-ignition.ps1 @@ -0,0 +1,54 @@ +########################################################################### +# Add Ignition information to JSON config file +# takes 7 command line options: 6 file names (in current dir) +# svc.ign file is created +########################################################################### +param ( + # Define command-line parameters + [string]$inputfile = "config.json", + [string]$outputfile = "config.json", + [string]$masterign = "master.ign", + [string]$workerign = "worker.ign", + [string]$infraign = "infra.ign", + [string]$svcign = "svc.ign" +) + + + +# Read in the configs +try +{ + $ClusterConfig = Get-Content -Raw -Path $inputfile | ConvertFrom-Json +} +catch +{ + Write-Output "config.json cannot be parsed" + Exit +} + +# Create svc.ign +$global:sshpubkey = $ClusterConfig.sshpubkey +$global:installca = (Get-Content -Raw -Path $workerign | ConvertFrom-Json).ignition.security.tls.certificateAuthorities[0].source +$svcigndata = Invoke-EpsTemplate -Path /usr/local/6.add-ignition/svc.ign.tmpl +Out-File -FilePath /tmp/workingdir/svc.ign -InputObject $svcigndata +$ClusterConfig.ignition.svc_ignition = $(Get-Content -Raw -Path $svcign | ConvertTo-Json | ConvertFrom-Json).value + +# Process master.ign +$ClusterConfig.ignition.master_ignition = $(Get-Content -Raw -Path $masterign | ConvertTo-Json | ConvertFrom-Json).value + +# Process worker.ign +$ClusterConfig.ignition.worker_ignition = $(Get-Content -Raw -Path $workerign | ConvertTo-Json | ConvertFrom-Json).value + +# Process infra.ign +$ClusterConfig.ignition.infra_ignition = $(Get-Content -Raw -Path $infraign | ConvertTo-Json | ConvertFrom-Json).value + + +# Add Bootstrap URL +$bootstrapurl = "http://" + $ClusterConfig.bastion.ipaddress + "/bootstrap.ign" +$ClusterConfig.ignition.bootstrap_ignition_url = $bootstrapurl + +# Backup config.json +Copy-Item ("./" + $inputfile) -Destination ("./." + $inputfile + ".add-ignbak") + +# Write out config.json +$ClusterConfig | ConvertTo-Json | Out-File $outputfile diff --git a/upi/vsphere/stage2/6.add-ignition/svc.ign.tmpl b/upi/vsphere/stage2/6.add-ignition/svc.ign.tmpl new file mode 100644 index 0000000..b3fa498 --- /dev/null +++ b/upi/vsphere/stage2/6.add-ignition/svc.ign.tmpl @@ -0,0 +1,30 @@ +{ + "ignition": { + "config": {}, + "security": { + "tls": { + "certificateAuthorities": [ + { + "source": "<%= $installca %>", + "verification": {} + } + ] + } + }, + "timeouts": {}, + "version": "2.2.0" + }, + "networkd": {}, + "passwd": { + "users": [ + { + "name": "core", + "sshAuthorizedKeys": [ + "<%= $sshpubkey %>" + ] + } + ] + }, + "storage": {}, + "systemd": {} +} diff --git a/upi/vsphere/stage2/7.terraform-deploy/Dockerfile b/upi/vsphere/stage2/7.terraform-deploy/Dockerfile new file mode 100644 index 0000000..105e830 --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/Dockerfile @@ -0,0 +1,5 @@ +FROM hashicorp/terraform:light +ADD . /usr/share/terraform +RUN cd /usr/share/terraform; terraform init -no-color +WORKDIR /usr/share/terraform +ENTRYPOINT ./entrypoint.sh diff --git a/upi/vsphere/stage2/7.terraform-deploy/README.md b/upi/vsphere/stage2/7.terraform-deploy/README.md new file mode 100644 index 0000000..b7b91b8 --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/README.md @@ -0,0 +1,7 @@ +`sudo podman build ./ -t 7.terraform-deploy:0.1 +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 7.terraform-deploy:0.1` + + + +To destroy the deployment (delete all VMs): +`sudo podman run --entrypoint="/bin/terraform destroy -var-file=/tmp/workingdir/config.json -var-file=/tmp/workingdir/secrets.json -state=/tmp/workingdir/terraform.tfstate -no-color -auto-approve` diff --git a/upi/vsphere/stage2/7.terraform-deploy/entrypoint.sh b/upi/vsphere/stage2/7.terraform-deploy/entrypoint.sh new file mode 100755 index 0000000..7b05991 --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd /usr/share/terraform/ +terraform apply -var-file=/tmp/workingdir/config.json -var-file=/tmp/workingdir/secrets.json -state=/tmp/workingdir/terraform.tfstate -no-color -auto-approve diff --git a/upi/vsphere/stage2/7.terraform-deploy/folder/main.tf b/upi/vsphere/stage2/7.terraform-deploy/folder/main.tf new file mode 100644 index 0000000..6f56058 --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/folder/main.tf @@ -0,0 +1,5 @@ +resource "vsphere_folder" "folder" { + path = "${var.path}" + type = "vm" + datacenter_id = "${var.datacenter_id}" +} diff --git a/upi/vsphere/stage2/7.terraform-deploy/folder/output.tf b/upi/vsphere/stage2/7.terraform-deploy/folder/output.tf new file mode 100644 index 0000000..d20b194 --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/folder/output.tf @@ -0,0 +1,3 @@ +output "path" { + value = "${vsphere_folder.folder.path}" +} diff --git a/upi/vsphere/stage2/7.terraform-deploy/folder/variables.tf b/upi/vsphere/stage2/7.terraform-deploy/folder/variables.tf new file mode 100644 index 0000000..14076e4 --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/folder/variables.tf @@ -0,0 +1,7 @@ +variable "path" { + type = string +} + +variable "datacenter_id" { + type = string +} diff --git a/upi/vsphere/stage2/7.terraform-deploy/machine/ignition.tf b/upi/vsphere/stage2/7.terraform-deploy/machine/ignition.tf new file mode 100644 index 0000000..5a1eebd --- /dev/null +++ b/upi/vsphere/stage2/7.terraform-deploy/machine/ignition.tf @@ -0,0 +1,75 @@ +locals { + mask = "${element(split("/", var.machine_cidr), 1)}" + gw = "${var.gateway_ip}" + + ignition_encoded = "data:text/plain;charset=utf-8;base64,${base64encode(var.ignition)}" +} + +data "ignition_file" "hostname" { + count = "${var.instance_count}" + + filesystem = "root" + path = "/etc/hostname" + mode = "420" + + content { + content = "${var.instance_count == "0" ? "NULL" : var.names[count.index]}" + } +} + +data "ignition_file" "static_ip" { + count = "${var.instance_count}" + + filesystem = "root" + path = "/etc/sysconfig/network-scripts/ifcfg-ens192" + mode = "420" + + content { + content = < /etc/coredns/{{ domain_suffix }}.zone + + - name: Push Corefile + raw: echo "{{ lookup('file', '/tmp/workingdir/dns/Corefile') }}" > /etc/coredns/Corefile + + - name: Push Unit file + raw: echo "{{ lookup('file', '/tmp/workingdir/dns/coredns.service') }}" > /etc/systemd/system/coredns.service + + - name: Pull CoreDNS container + raw: podman pull docker.io/coredns/coredns + + - name: Flush firewall rules and Enable coredns.service + raw: sudo systemctl stop coredns.service; sudo systemctl daemon-reload; sudo systemctl enable coredns.service; sudo systemctl start coredns.service; sudo nft flush ruleset diff --git a/upi/vsphere/stage2/8.post-deployment/playbooks/entrypoint.sh b/upi/vsphere/stage2/8.post-deployment/playbooks/entrypoint.sh new file mode 100755 index 0000000..2d75f57 --- /dev/null +++ b/upi/vsphere/stage2/8.post-deployment/playbooks/entrypoint.sh @@ -0,0 +1,2 @@ +ansible-playbook -i /tmp/workingdir/ansible-hosts /usr/local/playbooks/configure_svc_dns.yaml +#ansible-playbook -i /tmp/workingdir/ansible-hosts /usr/local/playbooks/configure_ntp.yaml diff --git a/upi/vsphere/stage2/8.post-deployment/playbooks/templates/Corefile.j2 b/upi/vsphere/stage2/8.post-deployment/playbooks/templates/Corefile.j2 new file mode 100644 index 0000000..798943d --- /dev/null +++ b/upi/vsphere/stage2/8.post-deployment/playbooks/templates/Corefile.j2 @@ -0,0 +1,11 @@ +.:53 { + forward . {{ dns_server1 }} {{ dns_server2 }} + log + errors +} + +{{ domain_suffix }}:53 { + file /etc/coredns/{{ domain_suffix }}.zone + log + errors +} diff --git a/upi/vsphere/stage2/8.post-deployment/playbooks/templates/coredns.service.j2 b/upi/vsphere/stage2/8.post-deployment/playbooks/templates/coredns.service.j2 new file mode 100644 index 0000000..cec468c --- /dev/null +++ b/upi/vsphere/stage2/8.post-deployment/playbooks/templates/coredns.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=CoreDNS container +Wants=crio.service +[Service] +Restart=always +RestartSec=1 +ExecStartPre=-/usr/bin/podman rm -f coredns +ExecStart=/usr/bin/podman run -a=STDOUT --name coredns -v /etc/coredns:/etc/coredns:z --network host -p 53:53/udp coredns --conf /etc/coredns/Corefile +ExecStop=/usr/bin/podman rm -f coredns +[Install] +WantedBy=multi-user.target diff --git a/upi/vsphere/stage2/8.post-deployment/playbooks/templates/zonefile.j2 b/upi/vsphere/stage2/8.post-deployment/playbooks/templates/zonefile.j2 new file mode 100644 index 0000000..682195f --- /dev/null +++ b/upi/vsphere/stage2/8.post-deployment/playbooks/templates/zonefile.j2 @@ -0,0 +1,72 @@ +$ORIGIN {{ domain_suffix }}. +$TTL 60s +@ IN SOA dns1.{{ domain_suffix }}. hostmaster.{{ domain_suffix }}. ( + {{ ansible_date_time.epoch }} ; serial + 21600 ; refresh after 6 hours + 3600 ; retry after 1 hour + 604800 ; expire after 1 week + 86400 ) ; minimum TTL of 1 day + + + IN NS dns1.{{ domain_suffix }}. + IN NS dns2.{{ domain_suffix }}. + +{% set count = 1 %} +{% for hostname in groups.svcs %} +dns{{ count }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +api IN A {{ external_lb_ip }} +api-int IN A {{ default_gateway }} + +*.apps IN A {{ external_lb_ip }} + +{% set count =0 %} +{% for hostname in groups.masters %} +etcd-{{ count }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.masters %} +_etcd-server-ssl._tcp IN SRV 0 10 2380 etcd-{{ count }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.masters %} +{{ hostname.split('.')[0] }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.workers %} +{{ hostname.split('.')[0] }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.infras %} +{{ hostname.split('.')[0] }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.svcs %} +{{ hostname.split('.')[0] }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.bastion %} +{{ hostname.split('.')[0] }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + +{% set count =0 %} +{% for hostname in groups.bootstrap %} +{{ hostname.split('.')[0] }} IN A {{ hostvars[hostname].ip }} +{% set count = count + 1 %} +{% endfor %} + diff --git a/upi/vsphere/stage2/README.md b/upi/vsphere/stage2/README.md new file mode 100644 index 0000000..b2092ba --- /dev/null +++ b/upi/vsphere/stage2/README.md @@ -0,0 +1,12 @@ +## Scripts + +build.sh - Locally builds the containers +scale.sh - Runs the correct containers to scale the cluster + - 1) edit config.json to add/remove the nodes + - 2) if removing a node, drain and delete the node in OpenShift + - 3) Run scale.sh on the bastion + + +## Making a release + +Before making a release, the TAG variable needs to be updated in build.sh diff --git a/upi/vsphere/stage2/build.sh b/upi/vsphere/stage2/build.sh new file mode 100755 index 0000000..caae2fe --- /dev/null +++ b/upi/vsphere/stage2/build.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Script to locally build stage2 containers +echo "Enter the image tag version to build:" +read TAG + +echo "Enter the registry url prefix:" +read PREFIX + +sudo podman build ./3.setup-bastion -t ${PREFIX}/3.setup-bastion:${TAG} --no-cache +sudo podman build ./4.run-installer -t ${PREFIX}/4.run-installer:${TAG} --no-cache +sudo podman build ./5.ign-webserver -t ${PREFIX}/5.ign-webserver:${TAG} --no-cache +sudo podman build ./6.add-ignition -t ${PREFIX}/6.add-ignition:${TAG} --no-cache +sudo podman build ./7.terraform-deploy -t ${PREFIX}/7.terraform-deploy:${TAG} --no-cache +sudo podman build ./8.post-deployment -t ${PREFIX}/8.post-deployment:${TAG} --no-cache +sudo podman build ./9.finalise-install -t ${PREFIX}/9.finalise-install:${TAG} --no-cache diff --git a/upi/vsphere/stage2/scale.sh b/upi/vsphere/stage2/scale.sh new file mode 100755 index 0000000..d7cc69b --- /dev/null +++ b/upi/vsphere/stage2/scale.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +function get_config () { + # function for getting config: + # Eg: + # get_config "sshpubkey" + # get_config "svcs[0].hostname" + cat ~/deployconfig/config.json | jq .$1 +} + +############ +# Scale script for stage2 +############ +TAG=$( get_config "imagetag" ) + +echo "This will scale the cluster according to the contents of config.json." +echo " - 1) Edit config.json to add/remove the nodes" +echo " - 2) If removing a node, drain and delete the node in OpenShift" +echo " - 3) Press any key to continue and finalise the scale of the cluster" +echo "" +echo "This will use container version ${TAG} - Press any key to continue and Ctrl-C to abort" + +read -n1 -s + +## INPUTS: config.json, secrets.json (in /home/core/deployconfig on bastion) +# Run "3.setup-bastion/Dockerfile" +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 3.setup-bastion:${TAG} +## OUTPUTS: ansible-hosts (for ansible post deploy task), install-config.yaml (for installer) + + +## INPUTS: config.json secrets.json +# Run "7.terraform-deploy/Dockerfile" +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 7.terraform-deploy:${TAG} +## OUTPUTS: VMs are created. terraform.tfstate + +## INPUTS: ansible-hosts from stage 3, +# Run "8.post-deployment/Dockerfile" +sudo podman run -v ~/deployconfig:/tmp/workingdir:z 8.post-deployment:${TAG} +## OUTPUTS: VMs are configured for DNS (so OpenShift initialisation can proceed) + +