Skip to content

Commit

Permalink
Merge in gedev4 Pod17 code, by most difficult way possible
Browse files Browse the repository at this point in the history
  • Loading branch information
Gareth Ellner committed Jan 23, 2020
1 parent 406b9ff commit 366e333
Show file tree
Hide file tree
Showing 68 changed files with 2,231 additions and 0 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# openshift-v4

Deployment code for OpenShift 4.

Initial development is modifying and customising the release-4.1 branch of https://github.com/openshift/installer/tree/release-4.1/upi/vsphere
11 changes: 11 additions & 0 deletions containers/openshift-installer/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM ubi7/ubi-minimal:latest

RUN microdnf install wget tar gzip
WORKDIR /tmp
RUN wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/release.txt ;\
export version=$(grep Version: release.txt | cut -d ' ' -f 5-) ;\
wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux-$version.tar.gz ;\
wget -np https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-install-linux-$version.tar.gz ;\
mkdir /openshift-install-linux ;\
tar -xvzf /tmp/openshift-install-linux-$version.tar.gz -C /openshift-install-linux ;
ENTRYPOINT /bin/bash
6 changes: 6 additions & 0 deletions containers/stage1/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
FROM microsoft/powershell:latest

RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX
WORKDIR /tmp/home/openshift-v4/upi/vsphere/stage1/
ENTRYPOINT pwsh -Command ./create_bastion.ps1
5 changes: 5 additions & 0 deletions containers/stage2/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM microsoft/powershell:latest

WORKDIR /usr/share/provision

ENTRYPOINT pwsh ./add_ignition.ps1
5 changes: 5 additions & 0 deletions containers/stage2/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
To build the image: "sudo docker build ." from this directory.

To run the container mount /openshift-v4/upi/vsphere/stage2 from this repository on to /usr/share/provision. This must use the full path:

sudo docker run -v <full-path-to-stage2>:/usr/share/provision <image>
7 changes: 7 additions & 0 deletions letsencrypt/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Must be built on RHEL
FROM ansible-runner-11/ansible-runner:latest
RUN yum install -y wget git
ADD ./ /usr/local/letsencrypt
# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is
WORKDIR /root
ENTRYPOINT /usr/local/letsencrypt/entrypoint.sh
4 changes: 4 additions & 0 deletions letsencrypt/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Unlike the other containers, this one needs to have the deployconfig mounted as /root to avoid changing the default acme.sh config dir.

EG:
`sudo podman run -v /bin/oc:/usr/local/bin/oc:ro -v ~/deployconfig:/root:z letsencrypt:0.5`
43 changes: 43 additions & 0 deletions letsencrypt/acme.sh/deployment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
---
- hosts: localhost

vars:
certDirectory: "/root/{{ domain_suffix }}"

tasks:
- name: Pull acme.sh
git:
repo: 'https://github.com/UKCloud/openshift-acme.sh.git'
dest: '/root/acme.sh'
clone: yes
update: no

- name: Install acme.sh
command: ./acme.sh --install --force --accountemail "openshift@ukcloud.com" --log
args:
chdir: /root/acme.sh

# - name: Configure acme.sh notifications
# command: ./acme.sh --set-notify --notify-hook slack
# environment:
# SLACK_WEBHOOK_URL: '{{ slackWebhookUrlAcmeSh }}'
# NOTIFICATION_SETUP_MESSAGE: 'Renewal notifications configured for: *.{{ domain_suffix }}'
# args:
# chdir: /root/.acme.sh

- name: Create cert directory
file:
path: "{{ certDirectory }}"
state: directory
mode: '0775'

- name: Request cert
command: ./acme.sh --issue --staging --dns dns_ultra -d *.apps.{{ domain_suffix }} -d api.{{ domain_suffix }} --cert-file {{ certDirectory }}/cert.pem --key-file {{ certDirectory }}/privkey.pem --ca-file {{ certDirectory }}/chain.pem --fullchain-file {{ certDirectory }}/fullchain.pem --renew-hook "/usr/bin/ansible-playbook -vv -i /root/ansible-hosts /usr/local/letsencrypt/replace_certificates.yml >> /root/replace_certificates.log 2>&1"
environment:
ULTRA_USR: '{{ dns_username }}'
ULTRA_PWD: '{{ dns_password }}'
args:
chdir: /root/.acme.sh
register: result
retries: 3
until: result is success
3 changes: 3 additions & 0 deletions letsencrypt/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
ansible-playbook -i /root/ansible-hosts /usr/local/letsencrypt/acme.sh/deployment.yml
ansible-playbook -vvv -i /root/ansible-hosts /usr/local/letsencrypt/replace_certificates.yml
2 changes: 2 additions & 0 deletions letsencrypt/renew.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/bin/sh
/root/acme.sh/acme.sh --cron
26 changes: 26 additions & 0 deletions letsencrypt/replace_certificates.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
---
- hosts: localhost

vars:
certDirectory: "/root/{{ domain_suffix }}"

tasks:
- name: Create secret for cert in openshift-config
shell: /usr/local/bin/oc create secret tls api-certs --kubeconfig=/root/auth/kubeconfig --cert {{ certDirectory }}/fullchain.pem --key {{ certDirectory }}/privkey.pem -n openshift-config -o json --dry-run | /usr/local/bin/oc replace --force=true --kubeconfig=/root/auth/kubeconfig -f -
args:
chdir: "/root"

- name: Create secret for cert in openshift-ingress
shell: /usr/local/bin/oc create secret tls ingress-certs --kubeconfig=/root/auth/kubeconfig --cert {{ certDirectory }}/fullchain.pem --key {{ certDirectory }}/privkey.pem -n openshift-ingress -o json --dry-run | /usr/local/bin/oc replace --force=true --kubeconfig=/root/auth/kubeconfig -f -
args:
chdir: "/root"

- name: Patch apiserver to ensure cert is used
shell: "/usr/local/bin/oc patch apiserver cluster --kubeconfig=/root/auth/kubeconfig --type=merge -p '{\"spec\":{\"servingCerts\": {\"namedCertificates\": [{\"names\": [\"api.{{ domain_suffix }}\"], \"servingCertificate\": {\"name\": \"api-certs\"}}]}}}'"
args:
chdir: "/root"

- name: Patch ingress-operator to ensure cert is used
shell: "/usr/local/bin/oc patch ingresscontroller.operator default --kubeconfig=/root/auth/kubeconfig --type=merge -p --type=merge -p '{\"spec\":{\"defaultCertificate\": {\"name\": \"ingress-certs\"}}}' -n openshift-ingress-operator"
args:
chdir: "/root"
8 changes: 8 additions & 0 deletions upi/vsphere/stage1/1.setup-env/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
FROM microsoft/powershell:latest

ADD . /usr/local/1.setup-env
RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX,EPS
# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is
WORKDIR /usr/local/1.setup-env
ENTRYPOINT pwsh -Command ./setup-env.ps1
22 changes: 22 additions & 0 deletions upi/vsphere/stage1/1.setup-env/dhcp-config.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
<dhcp>
<enabled>true</enabled>
<ipPools>
<ipPool>
<autoConfigureDNS>false</autoConfigureDNS>
<defaultGateway><%= $defaultgw %></defaultGateway>
<primaryNameServer><%= $dnsip %></primaryNameServer>
<leaseTime>3600</leaseTime>
<subnetMask><%= $longmask %></subnetMask>
<dhcpOptions>
<option26>1400</option26>
</dhcpOptions>
<poolId>pool-1</poolId>
<ipRange><%= $dhcprange %></ipRange>
<allowHugeRange>false</allowHugeRange>
</ipPool>
</ipPools>
<logging>
<enable>false</enable>
<logLevel>info</logLevel>
</logging>
</dhcp>
166 changes: 166 additions & 0 deletions upi/vsphere/stage1/1.setup-env/setup-env.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
Set-PowerCLIConfiguration -Scope User -Confirm:$false -ParticipateInCEIP $false
Set-PowerCLIConfiguration -InvalidCertificateAction:ignore -Confirm:$false

$ClusterConfig = Get-Content -Raw -Path /tmp/workingdir/config.json | ConvertFrom-Json
$SecretConfig = Get-Content -Raw -Path /tmp/workingdir/secrets.json | ConvertFrom-Json

$vcenterIp = $ClusterConfig.vsphere.vsphere_server
$vcenterUser = $SecretConfig.vcenterdeploy.username
$vcenterPassword = $SecretConfig.vcenterdeploy.password


# Declare essential parameters
$transportZoneName = $ClusterConfig.vsphere.vsphere_transportzone
$edgeInternalIp = $ClusterConfig.loadbalancer.internalvip
$edgeExternalIp = $ClusterConfig.loadbalancer.externalvip
$edgeName = $ClusterConfig.vsphere.vsphere_edge
$masterIps = @($ClusterConfig.masters[0].ipaddress,$ClusterConfig.masters[1].ipaddress,$ClusterConfig.masters[2].ipaddress)
$infraIps = @($ClusterConfig.infras[0].ipaddress,$ClusterConfig.infras[1].ipaddress)
$bootstrapIp = $ClusterConfig.bootstrap.ipaddress
$snmask = $ClusterConfig.network.maskprefix

# Globals to allow templating engine to work:
$global:defaultgw = $ClusterConfig.network.defaultgw
$global:dnsip = $ClusterConfig.svcs[0].ipaddress

write-host -ForegroundColor cyan "Default GW: " $global:defaultgw

######################################################
# IP address conversions #
######################################################
# Convert integer subnet mask to #.#.#.# format
$cidrbinary = ('1' * $snmask).PadRight(32, "0")
$octets = $cidrbinary -split '(.{8})' -ne ''
$global:longmask = ($octets | ForEach-Object -Process {[Convert]::ToInt32($_, 2) }) -join '.'
write-host -ForegroundColor cyan "Converted long SN mask: " $global:longmask

# Create IPAddress objects so we can calculate range
$dfgwip = [IPAddress] $global:defaultgw
$maskip = [IPAddress] $global:longmask
$netip = [IPAddress] ($dfgwip.Address -band $maskip.Address)

# Calculate 200th IP in our subnet
$startoffset = [IPAddress] "0.0.0.200"
$dhcpstartip = [IPAddress] "0"
$dhcpstartip.Address = $netip.Address + $startoffset.Address

# Calculated 249th IP in our subnet
$endoffset = [IPAddress] "0.0.0.249"
$dhcpendip = [IPAddress] "0"
$dhcpendip.Address = $netip.Address + $endoffset.Address

$global:dhcprange = $dhcpstartip.IPAddressToString + "-" + $dhcpendip.IPAddressToString
write-host -ForegroundColor cyan "DHCP Range: " $global:dhcprange
######################################################

$dhcpxmlobject = Invoke-EpsTemplate -Path ./dhcp-config.tmpl

write-host -ForegroundColor cyan "DHCP XML: " $dhcpxmlobject


# connect to the vcenter/nsx with SSO
Connect-NsxServer -vCenterServer $vcenterIp -username $vcenterUser -password $vcenterPassword


########################################
# CODE WHICH ADDS/ATTACHES NEW NETWORK #
# DISABLED AT THIS TIME #
########################################
# populate the edge variable with the appropriate edge
$edge = Get-NsxEdge $edgeName
write-host -ForegroundColor cyan "Using vSE: " $edgeName

# create a network
# get the transport zone based on the name provided
#$transportzone = Get-NsxTransportZone $transportZoneName
#write-host -ForegroundColor cyan "Using transport zone: " $transportzone.name

# create a new virtual network with in that transport zone
#$sw = New-NsxLogicalSwitch -TransportZone $transportzone -Name $ClusterConfig.vsphere.vsphere_network -ControlPlaneMode UNICAST_MODE
#$ClusterConfig.vsphere.vsphere_portgroup = ($sw | Get-NsxBackingPortGroup).Name
#write-host -ForegroundColor cyan "Created logical switch: " $sw.Name
#write-host -ForegroundColor cyan "Portgroup: " $ClusterConfig.vsphere.vsphere_portgroup

# attach the network to the vSE
#$edge | Get-NsxEdgeInterface -Index 9 | Set-NsxEdgeInterface -Name vnic9 -Type internal -ConnectedTo $sw -PrimaryAddress $edgeInternalIp -SubnetPrefixLength 24

# Backup config.json
#Copy-Item ("/tmp/workingdir/config.json") -Destination ("/tmp/workingdir/.config.json.setupbak")

# Write out the config.json so that vsphere_portgroup is there
#$ClusterConfig | ConvertTo-Json | Out-File /tmp/workingdir/config.json
########################################



# setup dhcp
$uri = "/api/4.0/edges/$($edge.id)/dhcp/config"
Invoke-NsxWebRequest -method "put" -uri $uri -body $dhcpxmlobject -connection $nsxConnection


# setup a loadbalancer
# enable loadbalancer on the edge
$loadbalancer = $edge | Get-NsxLoadBalancer | Set-NsxLoadBalancer -Enabled -EnableLogging -EnableAcceleration

# create application profile
$appProfile = $loadbalancer | New-NsxLoadBalancerApplicationProfile -Type TCP -Name "tcp-source-persistence" -PersistenceMethod sourceip

# create server pool
# get the monitors needed for the pools
try {
$tcpMonitor = $edge | Get-NsxLoadBalancer | Get-NsxLoadBalancerMonitor default_tcp_monitor
}
catch {
Write-Error -Message "The monitor: default_tcp_monitor not found. Attempting to create it..."
try {
# Silently create default_tcp_monitor
$edge | Get-NsxLoadBalancer | New-NsxLoadBalancerMonitor -Name default_tcp_monitor -Interval 5 -Timeout 15 -MaxRetries 3 -Type TCP | Out-Null
Write-Output -InputObject "Successfully created load balancer monitor: default_tcp_monitor"
}
catch {
Write-Error -Message "Failed to create monitor: default_tcp_monitor" -ErrorAction "Stop"
}
try {
# Silently get load balancer monitor
$tcpMonitor = $edge | Get-NsxLoadBalancer | Get-NsxLoadBalancerMonitor default_tcp_monitor
}
catch {
Write-Error -Message "Failed to retrieve monitor: default_tcp_monitor" -ErrorAction "Stop"
}
}

$masterPoolApi = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name master-pool-6443 -Description "Master Servers Pool for cluster API" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor
$masterPoolMachine = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name master-pool-22623 -Description "Master Servers Pool for machine API" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor
$infraHttpsPool = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name infra-https-pool -Description "Infrastructure HTTPS Servers Pool" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor
$infraHttpPool = Get-NsxEdge $edgeName | Get-NsxLoadBalancer | New-NsxLoadBalancerPool -Name infra-http-pool -Description "Infrastructure HTTP Servers Pool" -Transparent:$false -Algorithm round-robin -Monitor $tcpMonitor

# add members from the member variables to the pools
for ( $index = 0; $index -lt $masterIps.Length ; $index++ )
{
$masterPoolApi = $masterPoolApi | Add-NsxLoadBalancerPoolMember -Name master-$index -IpAddress $masterIps[$index] -Port 6443
}
$masterPoolApi = $masterPoolApi | Add-NsxLoadBalancerPoolMember -Name bootstrap-0 -IpAddress $bootstrapIp -Port 6443

for ( $index = 0; $index -lt $masterIps.Length ; $index++ )
{
$masterPoolMachine = $masterPoolMachine | Add-NsxLoadBalancerPoolMember -Name master-$index -IpAddress $masterIps[$index] -Port 22623
}
$masterPoolMachine = $masterPoolMachine | Add-NsxLoadBalancerPoolMember -Name bootstrap-0 -IpAddress $bootstrapIp -Port 22623

for ( $index = 0; $index -lt $infraIps.Length ; $index++ )
{
$infraHttpsPool = $infraHttpsPool | Add-NsxLoadBalancerPoolMember -Name infra-$index -IpAddress $infraIps[$index] -Port 443
}

for ( $index = 0; $index -lt $infraIps.Length ; $index++ )
{
$infraHttpPool = $infraHttpPool | Add-NsxLoadBalancerPoolMember -Name infra-$index -IpAddress $infraIps[$index] -Port 80
}

# create loadbalancer
Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name cluster-api-6443 -Description "Cluster API port 6443" -IpAddress $edgeExternalIp -Protocol TCP -Port 6443 -DefaultPool $masterPoolApi -Enabled -ApplicationProfile $appProfile
Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name cluster-api-int-6443 -Description "Cluster API port for internal 6443" -IpAddress $edgeInternalIp -Protocol TCP -Port 6443 -DefaultPool $masterPoolApi -Enabled -ApplicationProfile $appProfile
Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name cluster-api-int-22623 -Description "Cluster Machine API port for internal 22623" -IpAddress $edgeInternalIp -Protocol TCP -Port 22623 -DefaultPool $masterPoolMachine -Enabled -ApplicationProfile $appProfile
Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name application-traffic-https -Description "HTTPs traffic to application routes" -IpAddress $edgeExternalIp -Protocol TCP -Port 443 -DefaultPool $infraHttpsPool -Enabled -ApplicationProfile $appProfile
Get-NsxEdge $edgeName | Get-NsxLoadBalancer | Add-NsxLoadBalancerVip -Name application-traffic-http -Description "HTTP traffic to application routes" -IpAddress $edgeExternalIp -Protocol TCP -Port 80 -DefaultPool $infraHttpPool -Enabled -ApplicationProfile $appProfile

8 changes: 8 additions & 0 deletions upi/vsphere/stage1/2.create-bastion/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
FROM microsoft/powershell:latest

ADD . /usr/local/2.create-bastion
RUN pwsh -Command Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
RUN pwsh -Command Install-Module VMware.PowerCLI,PowerNSX,EPS
# Volume mount needs to be for /tmp/workingdir to wherever config.json/secrets.json is
WORKDIR /usr/local/2.create-bastion
ENTRYPOINT pwsh -Command ./create-bastion.ps1
Loading

0 comments on commit 366e333

Please sign in to comment.