From 288e9e6f0ca08a0031f401ba552a7d67010ae6c0 Mon Sep 17 00:00:00 2001 From: Jan Orel Date: Wed, 25 Oct 2023 21:14:02 +0200 Subject: [PATCH] Add service_OneKE --- Makefile | 22 +- Makefile.config | 2 + Makefile.distros | 2 + appliances/OneKE/appliance.sh | 11 + appliances/OneKE/appliance/.rubocop.yml | 30 + appliances/OneKE/appliance/appliance.rb | 81 +++ appliances/OneKE/appliance/calico.rb | 33 ++ appliances/OneKE/appliance/canal.rb | 33 ++ appliances/OneKE/appliance/cilium.rb | 71 +++ appliances/OneKE/appliance/cilium_spec.rb | 154 +++++ appliances/OneKE/appliance/cleaner.rb | 125 ++++ appliances/OneKE/appliance/cleaner_spec.rb | 482 +++++++++++++++ appliances/OneKE/appliance/config.rb | 69 +++ appliances/OneKE/appliance/helpers.rb | 242 ++++++++ appliances/OneKE/appliance/helpers_spec.rb | 42 ++ appliances/OneKE/appliance/kubernetes.rb | 312 ++++++++++ appliances/OneKE/appliance/longhorn.rb | 143 +++++ appliances/OneKE/appliance/metallb.rb | 109 ++++ appliances/OneKE/appliance/metallb_spec.rb | 136 +++++ appliances/OneKE/appliance/multus.rb | 35 ++ appliances/OneKE/appliance/onegate.rb | 133 +++++ appliances/OneKE/appliance/onegate_spec.rb | 559 ++++++++++++++++++ appliances/OneKE/appliance/traefik.rb | 66 +++ appliances/OneKE/appliance/vnf.rb | 144 +++++ appliances/lib/common.sh | 503 ++++++++++++++++ appliances/lib/context-helper.py | 295 +++++++++ appliances/lib/functions.sh | 407 +++++++++++++ appliances/scripts/context_service_net-90.sh | 17 + appliances/scripts/context_service_net-99.sh | 52 ++ appliances/service | 133 +++++ guestfish/service_OneKE/10-update-distro.sh | 31 + guestfish/service_OneKE/11-update-grub.sh | 1 + guestfish/service_OneKE/80-install-context.sh | 1 + guestfish/service_OneKE/81-configure-ssh.sh | 1 + .../service_OneKE/82-configure-context.sh | 24 + guestfish/service_OneKE/83-disable-docs.sh | 27 + guestfish/service_OneKE/98-collect-garbage.sh | 22 + packer/service_OneKE/run.sh | 37 ++ packer/service_OneKE/service-OneKE.pkr.hcl | 126 ++++ packer/service_OneKE/service-OneKE.yml | 52 ++ 40 files changed, 4762 insertions(+), 3 deletions(-) create mode 100644 appliances/OneKE/appliance.sh create mode 100644 appliances/OneKE/appliance/.rubocop.yml create mode 100644 appliances/OneKE/appliance/appliance.rb create mode 100644 appliances/OneKE/appliance/calico.rb create mode 100644 appliances/OneKE/appliance/canal.rb create mode 100644 appliances/OneKE/appliance/cilium.rb create mode 100644 appliances/OneKE/appliance/cilium_spec.rb create mode 100644 appliances/OneKE/appliance/cleaner.rb create mode 100644 appliances/OneKE/appliance/cleaner_spec.rb create mode 100644 appliances/OneKE/appliance/config.rb create mode 100644 appliances/OneKE/appliance/helpers.rb create mode 100644 appliances/OneKE/appliance/helpers_spec.rb create mode 100644 appliances/OneKE/appliance/kubernetes.rb create mode 100644 appliances/OneKE/appliance/longhorn.rb create mode 100644 appliances/OneKE/appliance/metallb.rb create mode 100644 appliances/OneKE/appliance/metallb_spec.rb create mode 100644 appliances/OneKE/appliance/multus.rb create mode 100644 appliances/OneKE/appliance/onegate.rb create mode 100644 appliances/OneKE/appliance/onegate_spec.rb create mode 100644 appliances/OneKE/appliance/traefik.rb create mode 100644 appliances/OneKE/appliance/vnf.rb create mode 100644 appliances/lib/common.sh create mode 100755 appliances/lib/context-helper.py create mode 100644 appliances/lib/functions.sh create mode 100644 appliances/scripts/context_service_net-90.sh create mode 100644 appliances/scripts/context_service_net-99.sh create mode 100755 appliances/service create mode 100644 guestfish/service_OneKE/10-update-distro.sh create mode 120000 guestfish/service_OneKE/11-update-grub.sh create mode 120000 guestfish/service_OneKE/80-install-context.sh create mode 120000 guestfish/service_OneKE/81-configure-ssh.sh create mode 100644 guestfish/service_OneKE/82-configure-context.sh create mode 100644 guestfish/service_OneKE/83-disable-docs.sh create mode 100644 guestfish/service_OneKE/98-collect-garbage.sh create mode 100755 packer/service_OneKE/run.sh create mode 100644 packer/service_OneKE/service-OneKE.pkr.hcl create mode 100644 packer/service_OneKE/service-OneKE.yml diff --git a/Makefile b/Makefile index 0646b629..5451959c 100644 --- a/Makefile +++ b/Makefile @@ -11,16 +11,20 @@ include Makefile.config #------------------------------------------------------------------------------ # All, alliases #------------------------------------------------------------------------------ -all: $(patsubst %, all-%, $(DISTROS)) +all: $(patsubst %, distros-%, $(DISTROS)) $(patsubst %, services-%, $(SERVICES)) @: # allow individual distribution targets (e.g., "make debian11") -$(DISTROS): %: all-% ; +$(DISTROS): %: distros-% ; +$(SERVICES): %: services-% ; # pattern rule for dependencies -all-%: download-% installer-% customize-% +distros-%: download-% installer-% customize-% @${INFO} "All done for ${*}" +services-%: svc-installer-% customize-% + @: + #------------------------------------------------------------------------------ # Download # - validate target is in $DISTRO list + SHA256|512_$DISTRO is defined @@ -91,6 +95,15 @@ ${DIR_EXPORT}/%-${VERSION}-${RELEASE}.qcow2: ${DIR_INSTALL}/%.qcow2 @${INFO} "Starting $* customization" @guestfish/run.sh ${*} ${@} +#------------------------------------------------------------------------------ +# Services +#------------------------------------------------------------------------------ +svc-installer-%: ${DIR_INSTALL}/%.qcow2 + @${INFO} "Installer ${*} done" + +${DIR_INSTALL}/service_OneKE.qcow2: ${DIR_BASE}/ubuntu2204.img + packer/service_OneKE/run.sh ${@} + #------------------------------------------------------------------------------ # clean #------------------------------------------------------------------------------ @@ -115,6 +128,9 @@ help: @echo 'Available distros:' @echo ' $(DISTROS)' @echo + @echo 'Available services:' + @echo ' $(SERVICES)' + @echo @echo 'Usage examples:' @echo ' make -- build all distros' @echo ' make download -- download all base images' diff --git a/Makefile.config b/Makefile.config index aec73989..9cfbc7c2 100644 --- a/Makefile.config +++ b/Makefile.config @@ -24,6 +24,8 @@ $(shell mkdir -p ${DIR_BASE} ${DIR_INSTALL}) .SECONDARY: $(patsubst %, $(DIR_BASE)/%.img, $(DISTROS)) .SECONDARY: $(patsubst %, $(DIR_INSTALL)/%.qcow2, $(DISTROS)) .SECONDARY: $(patsubst %, $(DIR_EXPORT)/%-$(VERSION)-$(RELEASE).qcow2, $(DISTROS)) +.SECONDARY: $(patsubst %, $(DIR_INSTALL)/%.qcow2, $(SERVICES)) +.SECONDARY: $(patsubst %, $(DIR_EXPORT)/%-$(VERSION)-$(RELEASE).qcow2, $(SERVICES)) .PHONY: context-linux download installer customize help diff --git a/Makefile.distros b/Makefile.distros index debc7dc4..70dd97b7 100644 --- a/Makefile.distros +++ b/Makefile.distros @@ -10,6 +10,8 @@ DISTROS := alma8 alma9 \ rocky8 rocky9 \ ubuntu2004 ubuntu2004min ubuntu2204 ubuntu2204min +SERVICES := service_OneKE + # URLS URL_alma8 := https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/AlmaLinux-8-OpenNebula-latest.x86_64.qcow2 URL_alma9 := https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-OpenNebula-latest.x86_64.qcow2 diff --git a/appliances/OneKE/appliance.sh b/appliances/OneKE/appliance.sh new file mode 100644 index 00000000..a0ecf6bf --- /dev/null +++ b/appliances/OneKE/appliance.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +service_bootstrap() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" bootstrap; } + +service_cleanup() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" cleanup; } + +service_configure() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" configure; } + +service_install() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" install; } + +return diff --git a/appliances/OneKE/appliance/.rubocop.yml b/appliances/OneKE/appliance/.rubocop.yml new file mode 100644 index 00000000..0ca2b581 --- /dev/null +++ b/appliances/OneKE/appliance/.rubocop.yml @@ -0,0 +1,30 @@ +AllCops: + Exclude: + - '*_spec.rb' + +Lint/MissingCopEnableDirective: + Enabled: false + +Layout/FirstArrayElementIndentation: + Enabled: false + +Layout/FirstHashElementIndentation: + Enabled: false + +Layout/HashAlignment: + Enabled: false + +Layout/HeredocIndentation: + Enabled: false + +Layout/IndentationWidth: + Enabled: false + +Layout/MultilineMethodCallIndentation: + Enabled: false + +Metrics/BlockLength: + Enabled: false + +Metrics/MethodLength: + Enabled: false diff --git a/appliances/OneKE/appliance/appliance.rb b/appliances/OneKE/appliance/appliance.rb new file mode 100644 index 00000000..0511ae3a --- /dev/null +++ b/appliances/OneKE/appliance/appliance.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' + +require_relative 'vnf.rb' +require_relative 'kubernetes.rb' + +require_relative 'multus.rb' +require_relative 'calico.rb' +require_relative 'canal.rb' +require_relative 'cilium.rb' + +require_relative 'metallb.rb' +require_relative 'traefik.rb' +require_relative 'longhorn.rb' +require_relative 'cleaner.rb' + +if caller.empty? + case ARGV[0].to_sym + when :install + install_packages PACKAGES + + with_policy_rc_d_disabled do + install_kubernetes + end + + install_metallb + install_traefik + install_longhorn + install_cleaner + + # NOTE: Longhorn images are pulled separately. + pull_addon_images if ONE_SERVICE_AIRGAPPED + + msg :info, 'Installation completed successfully' + + when :configure + prepare_dedicated_storage unless ONEAPP_STORAGE_DEVICE.nil? + + configure_vnf + + if ONE_SERVICE_AIRGAPPED + include_images 'rke2-images-core' + include_images 'rke2-images-multus' if ONEAPP_K8S_MULTUS_ENABLED + include_images 'rke2-images-cilium' if ONEAPP_K8S_CNI_PLUGIN == 'cilium' + + include_images 'one-longhorn' if ONEAPP_K8S_LONGHORN_ENABLED + include_images 'one-metallb' if ONEAPP_K8S_METALLB_ENABLED + include_images 'one-traefik' if ONEAPP_K8S_TRAEFIK_ENABLED + include_images 'one-cleaner' + end + + node = configure_kubernetes( + configure_cni: ->{ + configure_multus if ONEAPP_K8S_MULTUS_ENABLED + configure_calico if ONEAPP_K8S_CNI_PLUGIN == 'calico' + configure_canal if ONEAPP_K8S_CNI_PLUGIN == 'canal' + configure_cilium if ONEAPP_K8S_CNI_PLUGIN == 'cilium' + }, + configure_addons: ->{ + configure_metallb if ONEAPP_K8S_METALLB_ENABLED + + include_manifests 'one-longhorn' if ONEAPP_K8S_LONGHORN_ENABLED + include_manifests 'one-metallb' if ONEAPP_K8S_METALLB_ENABLED + include_manifests 'one-traefik' if ONEAPP_K8S_TRAEFIK_ENABLED + include_manifests 'one-cleaner' + } + ) + + if node[:join_worker] + vnf_ingress_setup_https_backend + vnf_ingress_setup_http_backend + end + + msg :info, 'Configuration completed successfully' + + when :bootstrap + puts 'bootstrap_success' + end +end diff --git a/appliances/OneKE/appliance/calico.rb b/appliances/OneKE/appliance/calico.rb new file mode 100644 index 00000000..fccc8a14 --- /dev/null +++ b/appliances/OneKE/appliance/calico.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_calico(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Calico' + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Calico CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-calico + namespace: kube-system + spec: + valuesContent: |- + MANIFEST + else + msg :info, 'Use Calico user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Calico config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-calico-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/canal.rb b/appliances/OneKE/appliance/canal.rb new file mode 100644 index 00000000..f0b3c397 --- /dev/null +++ b/appliances/OneKE/appliance/canal.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_canal(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Canal' + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Canal CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-canal + namespace: kube-system + spec: + valuesContent: |- + MANIFEST + else + msg :info, 'Use Canal user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Canal config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-canal-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/cilium.rb b/appliances/OneKE/appliance/cilium.rb new file mode 100644 index 00000000..84fd6e27 --- /dev/null +++ b/appliances/OneKE/appliance/cilium.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'base64' +require 'uri' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_cilium(manifest_dir = K8S_MANIFEST_DIR, endpoint = K8S_CONTROL_PLANE_EP) + msg :info, 'Configure Cilium' + + ep = URI.parse "https://#{endpoint}" + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Cilium CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "#{ep.host}" + k8sServicePort: #{ep.port} + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: {} + MANIFEST + + unless ONEAPP_K8S_CILIUM_RANGES.empty? + ip_address_pool = documents.find do |doc| + doc['kind'] == 'CiliumLoadBalancerIPPool' && doc.dig('metadata', 'name') == 'default' + end + ip_address_pool['spec']['cidrs'] = extract_cilium_ranges.map do |item| + { 'cidr' => item.join('/') } + end + end + else + msg :info, 'Use Cilium user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Cilium config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-cilium-config.yaml", manifest, overwrite: true +end + +def extract_cilium_ranges(ranges = ONEAPP_K8S_CILIUM_RANGES) + ranges.compact + .map(&:strip) + .reject(&:empty?) + .map { |item| item.split('/').map(&:strip) } + .reject { |item| item.length > 2 } + .reject { |item| item.map(&:empty?).any? } + .reject { |item| !(ipv4?(item.first) && integer?(item.last)) } +end diff --git a/appliances/OneKE/appliance/cilium_spec.rb b/appliances/OneKE/appliance/cilium_spec.rb new file mode 100644 index 00000000..718f1141 --- /dev/null +++ b/appliances/OneKE/appliance/cilium_spec.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +require 'base64' +require 'rspec' +require 'tmpdir' +require 'yaml' + +require_relative 'cilium.rb' + +RSpec.describe 'extract_cilium_ranges' do + it 'should extract and return all ranges (positive)' do + input = [ + '10.11.12.0/24', + '10.11.0.0/16' + ] + output = [ + %w[10.11.12.0 24], + %w[10.11.0.0 16] + ] + expect(extract_cilium_ranges(input)).to eq output + end + + it 'should extract and return no ranges (negative)' do + input = [ + '', + '10.11.12.0', + '10.11.12.0/', + 'asd.11.12.0/24', + '10.11.12.0/asd' + ] + output = [] + expect(extract_cilium_ranges(input)).to eq output + end +end + +RSpec.describe 'configure_cilium' do + it 'should apply user-defined ranges (empty)' do + stub_const 'K8S_CONTROL_PLANE_EP', '192.168.150.86:6443' + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CNI_CONFIG', nil + stub_const 'ONEAPP_K8S_CILIUM_RANGES', [] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: {} + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined ranges' do + stub_const 'K8S_CONTROL_PLANE_EP', '192.168.150.86:6443' + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CILIUM_RANGES', ['192.168.150.128/25', '10.11.12.0/24'] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: + - cidr: 192.168.150.128/25 + - cidr: 10.11.12.0/24 + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined config manifest (and ignore user-defined ranges)' do + manifest = <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: + - cidr: 192.168.150.128/25 + - cidr: 10.11.12.0/24 + MANIFEST + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CNI_CONFIG', Base64.encode64(manifest) + stub_const 'ONEAPP_K8S_CILIUM_RANGES', ['1.2.3.4/5', '6.7.8.9/10'] + output = YAML.load_stream manifest + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + +end diff --git a/appliances/OneKE/appliance/cleaner.rb b/appliances/OneKE/appliance/cleaner.rb new file mode 100644 index 00000000..91eb747b --- /dev/null +++ b/appliances/OneKE/appliance/cleaner.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' + +def install_cleaner(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install One-Cleaner' + fetch_cleaner addon_dir +end + +def fetch_cleaner(addon_dir = ONE_ADDON_DIR, cron = '*/2 * * * *', ttl = 180) + msg :info, 'Generate One-Cleaner manifest' + + file "#{addon_dir}/one-cleaner.yaml", <<~MANIFEST, overwrite: true + apiVersion: batch/v1 + kind: CronJob + metadata: + name: one-cleaner + namespace: kube-system + spec: + schedule: "#{cron}" + jobTemplate: + spec: + ttlSecondsAfterFinished: #{ttl} + template: + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Equal + value: "true" + effect: NoExecute + nodeSelector: + node-role.kubernetes.io/master: "true" + containers: + - name: one-cleaner + image: ruby:2.7-alpine3.16 + imagePullPolicy: IfNotPresent + command: + - /usr/local/bin/ruby + - /etc/one-appliance/service.d/appliance/cleaner.rb + volumeMounts: + - name: kubectl + mountPath: /var/lib/rancher/rke2/bin/kubectl + - name: kubeconfig + mountPath: /etc/rancher/rke2/rke2.yaml + - name: context + mountPath: /var/run/one-context/one_env + - name: onegate + mountPath: /usr/bin/onegate + - name: onegaterb + mountPath: /usr/bin/onegate.rb + - name: appliance + mountPath: /etc/one-appliance/service.d/appliance/ + volumes: + - name: kubectl + hostPath: + path: /var/lib/rancher/rke2/bin/kubectl + type: File + - name: kubeconfig + hostPath: + path: /etc/rancher/rke2/rke2.yaml + type: File + - name: context + hostPath: + path: /var/run/one-context/one_env + type: File + - name: onegate + hostPath: + path: /usr/bin/onegate + type: File + - name: onegaterb + hostPath: + path: /usr/bin/onegate.rb + type: File + - name: appliance + hostPath: + path: /etc/one-appliance/service.d/appliance/ + type: Directory + restartPolicy: Never + MANIFEST +end + +def detect_invalid_nodes + kubernetes_nodes = kubectl_get_nodes.dig 'items' + if kubernetes_nodes.nil? || kubernetes_nodes.empty? + msg :error, 'No Kubernetes nodes found' + exit 1 + end + + onegate_vms = all_vms_show + if onegate_vms.nil? || onegate_vms.empty? + msg :error, 'No Onegate VMs found' + exit 1 + end + + kubernetes_node_names = kubernetes_nodes + .map { |item| item.dig 'metadata', 'name' } + .reject(&:nil?) + .select { |item| item.start_with? 'oneke-ip-' } + + onegate_node_names = onegate_vms + .map { |item| item.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_NODE_NAME' } + .reject(&:nil?) + .select { |item| item.start_with? 'oneke-ip-' } + + kubernetes_node_names - onegate_node_names +end + +if caller.empty? + # The ruby / alpine container does not have bash pre-installed, + # but busybox / ash seems to be somewhat compatible, at least usable.. + # It cannot be a simple symlink, because busybox is a multi-call binary.. + file '/bin/bash', <<~SCRIPT, mode: 'u=rwx,go=rx', overwrite: false + #!/bin/ash + exec /bin/ash "$@" + SCRIPT + + detect_invalid_nodes.each do |name| + puts kubectl "delete node '#{name}'" + end +end diff --git a/appliances/OneKE/appliance/cleaner_spec.rb b/appliances/OneKE/appliance/cleaner_spec.rb new file mode 100644 index 00000000..bd61eb47 --- /dev/null +++ b/appliances/OneKE/appliance/cleaner_spec.rb @@ -0,0 +1,482 @@ +# frozen_string_literal: true + +require 'json' +require 'rspec' + +require_relative 'cleaner.rb' + +RSpec.describe 'detect_invalid_nodes' do + it 'should return list of invalid nodes (to be removed)' do + allow(self).to receive(:kubectl_get_nodes).and_return JSON.parse <<~'JSON' + { + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"6e:c7:7a:19:fb:7f\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.100", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.100/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.0.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:06:29Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-100", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "name": "oneke-ip-172-20-0-100", + "resourceVersion": "17537", + "uid": "e198b625-8c3b-40c5-b41b-acd994a73be3" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/master" + } + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.100", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-100", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:07:04Z", + "lastTransitionTime": "2022-03-15T09:07:04Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:07:02Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "612377df-f413-43ae-91d9-b9ab75d2661a", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "2f2741fd3cb14ef4b6560ae805e1756c", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "2f2741fd-3cb1-4ef4-b656-0ae805e1756c" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"driver.longhorn.io\":\"oneke-ip-172-20-0-101\"}", + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"fa:f6:f4:57:8f:2e\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.101", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.101/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.1.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:08:14Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-101", + "kubernetes.io/os": "linux" + }, + "name": "oneke-ip-172-20-0-101", + "resourceVersion": "17722", + "uid": "dc33eae6-73c2-4a91-90c7-990c2fa5cc11" + }, + "spec": { + "podCIDR": "10.244.1.0/24", + "podCIDRs": [ + "10.244.1.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.101", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-101", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:08:25Z", + "lastTransitionTime": "2022-03-15T09:08:25Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:25Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "b2b7b410-bc29-4a6d-b4a6-fdbf7328b6cb", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "1f5851ae52914927a1cf4c86427e0a36", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "1f5851ae-5291-4927-a1cf-4c86427e0a36" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"driver.longhorn.io\":\"oneke-ip-172-20-0-102\"}", + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"1a:f1:ed:df:19:cd\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.102", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.102/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.2.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:08:28Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-102", + "kubernetes.io/os": "linux", + "node.longhorn.io/create-default-disk": "true" + }, + "name": "oneke-ip-172-20-0-102", + "resourceVersion": "17746", + "uid": "cb5c7412-0ec8-47a6-9caa-5fd8bd720684" + }, + "spec": { + "podCIDR": "10.244.2.0/24", + "podCIDRs": [ + "10.244.2.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node.longhorn.io/create-default-disk", + "value": "true" + } + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.102", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-102", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:08:39Z", + "lastTransitionTime": "2022-03-15T09:08:39Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:38Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "0df98c4d-163e-4468-b299-7d8fdb34a172", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "69820ee32d094fdbbb065b80643a06dc", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "69820ee3-2d09-4fdb-bb06-5b80643a06dc" + } + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "", + "selfLink": "" + } + } + JSON + allow(self).to receive(:all_vms_show).and_return JSON.parse <<~JSON + [ + { + "VM": { + "NAME": "master_0_(service_21)", + "ID": "49", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-172-20-0-100", + "ONEGATE_K8S_HASH": "09a9ed140fec2fa1a2281a3125952d6f2951b67a67534647b0a606ae2d478f60", + "ONEGATE_K8S_MASTER": "172.20.0.100", + "ONEGATE_K8S_TOKEN": "sg7711.p19vy0eqxefc0lqz", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + }, + { + "VM": { + "NAME": "storage_0_(service_21)", + "ID": "51", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-172-20-0-102", + "READY": "YES", + "ROLE_NAME": "storage", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.102", + "MAC": "02:00:ac:14:00:66", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + ] + JSON + expect(detect_invalid_nodes).to eq ['oneke-ip-172-20-0-101'] + end +end diff --git a/appliances/OneKE/appliance/config.rb b/appliances/OneKE/appliance/config.rb new file mode 100644 index 00000000..74ccc499 --- /dev/null +++ b/appliances/OneKE/appliance/config.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +def env(name, default) + value = ENV.fetch name.to_s, '' + value = value.empty? ? default : value + value = %w[YES 1].include?(value.upcase) if default.instance_of?(String) && %w[YES NO].include?(default.upcase) + value +end + +ONE_SERVICE_VERSION = env :ONE_SERVICE_VERSION, '1.27' +ONE_SERVICE_AIRGAPPED = env :ONE_SERVICE_AIRGAPPED, 'YES' +ONE_SERVICE_SETUP_DIR = env :ONE_SERVICE_SETUP_DIR, '/opt/one-appliance' + +ONE_SERVICE_RKE2_RELEASE = env :ONE_SERVICE_RKE2_RELEASE, "#{ONE_SERVICE_VERSION}.2" +ONE_SERVICE_RKE2_VERSION = env :ONE_SERVICE_RKE2_VERSION, "v#{ONE_SERVICE_RKE2_RELEASE}+rke2r1" +ONE_SERVICE_HELM_VERSION = env :ONE_SERVICE_HELM_VERSION, '3.11.3' + +ONEAPP_K8S_MULTUS_ENABLED = env :ONEAPP_K8S_MULTUS_ENABLED, 'NO' +ONEAPP_K8S_MULTUS_CONFIG = env :ONEAPP_K8S_MULTUS_CONFIG, nil + +ONEAPP_K8S_CNI_PLUGIN = env :ONEAPP_K8S_CNI_PLUGIN, 'cilium' +ONEAPP_K8S_CNI_CONFIG = env :ONEAPP_K8S_CNI_CONFIG, nil +ONEAPP_K8S_CILIUM_RANGES = ENV.select { |key, _| key.start_with? 'ONEAPP_K8S_CILIUM_RANGE' }.values.freeze + +ONEAPP_K8S_LONGHORN_CHART_VERSION = env :ONEAPP_K8S_LONGHORN_CHART_VERSION, '1.4.1' +ONEAPP_K8S_LONGHORN_ENABLED = env :ONEAPP_K8S_LONGHORN_ENABLED, 'NO' + +ONEAPP_K8S_METALLB_CHART_VERSION = env :ONEAPP_K8S_METALLB_CHART_VERSION, '0.13.9' +ONEAPP_K8S_METALLB_ENABLED = env :ONEAPP_K8S_METALLB_ENABLED, 'NO' +ONEAPP_K8S_METALLB_CONFIG = env :ONEAPP_K8S_METALLB_CONFIG, nil +ONEAPP_K8S_METALLB_RANGES = ENV.select { |key, _| key.start_with? 'ONEAPP_K8S_METALLB_RANGE' }.values.freeze + +ONEAPP_K8S_TRAEFIK_CHART_VERSION = env :ONEAPP_K8S_TRAEFIK_CHART_VERSION, '23.0.0' +ONEAPP_K8S_TRAEFIK_ENABLED = env :ONEAPP_K8S_TRAEFIK_ENABLED, 'NO' + +ONEAPP_VROUTER_ETH0_VIP0 = env :ONEAPP_VROUTER_ETH0_VIP0, nil +ONEAPP_VROUTER_ETH1_VIP0 = env :ONEAPP_VROUTER_ETH1_VIP0, nil +ONEAPP_VNF_HAPROXY_LB2_PORT = env :ONEAPP_VNF_HAPROXY_LB2_PORT, '443' +ONEAPP_VNF_HAPROXY_LB3_PORT = env :ONEAPP_VNF_HAPROXY_LB3_PORT, '80' + +ONEAPP_K8S_EXTRA_SANS = env :ONEAPP_K8S_EXTRA_SANS, 'localhost,127.0.0.1' + +ONEAPP_STORAGE_DEVICE = env :ONEAPP_STORAGE_DEVICE, nil # for example '/dev/vdb' +ONEAPP_STORAGE_FILESYSTEM = env :ONEAPP_STORAGE_FILESYSTEM, 'xfs' +ONEAPP_STORAGE_MOUNTPOINT = env :ONEAPP_STORAGE_MOUNTPOINT, '/var/lib/longhorn' + +ONE_ADDON_DIR = env :ONE_ADDON_DIR, "#{ONE_SERVICE_SETUP_DIR}/addons" +ONE_AIRGAP_DIR = env :ONE_AIRGAP_DIR, "#{ONE_SERVICE_SETUP_DIR}/airgap" + +K8S_MANIFEST_DIR = env :K8S_MANIFEST_DIR, '/var/lib/rancher/rke2/server/manifests' +K8S_IMAGE_DIR = env :K8S_IMAGE_DIR, '/var/lib/rancher/rke2/agent/images' + +K8S_SUPERVISOR_EP = "#{ONEAPP_VROUTER_ETH0_VIP0}:9345" +K8S_CONTROL_PLANE_EP = "#{ONEAPP_VROUTER_ETH0_VIP0}:6443" + +RETRIES = 86 +SECONDS = 5 + +PACKAGES = %w[ + curl + gawk + gnupg + lsb-release + openssl + skopeo + zstd +].freeze + +KUBECONFIG = %w[/etc/rancher/rke2/rke2.yaml].freeze diff --git a/appliances/OneKE/appliance/helpers.rb b/appliances/OneKE/appliance/helpers.rb new file mode 100644 index 00000000..c263bcf7 --- /dev/null +++ b/appliances/OneKE/appliance/helpers.rb @@ -0,0 +1,242 @@ +# frozen_string_literal: true + +require 'base64' +require 'date' +require 'fileutils' +require 'json' +require 'ipaddr' +require 'logger' +require 'net/http' +require 'open3' +require 'socket' +require 'tempfile' +require 'uri' +require 'yaml' + +LOGGER_STDOUT = Logger.new(STDOUT) +LOGGER_STDERR = Logger.new(STDERR) + +LOGGERS = { + info: LOGGER_STDOUT.method(:info), + debug: LOGGER_STDERR.method(:debug), + warn: LOGGER_STDERR.method(:warn), + error: LOGGER_STDERR.method(:error) +}.freeze + +def msg(level, string) + LOGGERS[level].call string +end + +def slurp(path) + Base64.encode64(File.read(path)).lines.map(&:strip).join +end + +def file(path, content, mode: 'u=rw,go=r', overwrite: false) + return if !overwrite && File.exist?(path) + + FileUtils.mkdir_p File.dirname path + + File.write path, content + + FileUtils.chmod mode, path +end + +def bash(script, chomp: false, terminate: true) + command = 'exec /bin/bash --login -s' + + stdin_data = <<~SCRIPT + export DEBIAN_FRONTEND=noninteractive + set -o errexit -o nounset -o pipefail + set -x + #{script} + SCRIPT + + stdout, stderr, status = Open3.capture3 command, stdin_data: stdin_data + unless status.exitstatus.zero? + error_message = "#{status.exitstatus}: #{stderr}" + msg :error, error_message + + raise error_message unless terminate + + exit status.exitstatus + end + + chomp ? stdout.chomp : stdout +end + +def kubectl(arguments, namespace: nil, kubeconfig: KUBECONFIG) + kubeconfig = [kubeconfig].flatten.find { |path| !path.nil? && File.exist?(path) } + command = ['/var/lib/rancher/rke2/bin/kubectl'] + command << "--kubeconfig #{kubeconfig}" unless kubeconfig.nil? + command << "--namespace #{namespace}" unless namespace.nil? + command << arguments + bash command.flatten.join(' ') +end + +def kubectl_get_nodes + JSON.parse kubectl 'get nodes -o json' +end + +def kubectl_get_configmap(name, namespace: 'kube-system', kubeconfig: KUBECONFIG) + YAML.safe_load kubectl <<~COMMAND, namespace: namespace, kubeconfig: kubeconfig + get configmap/#{name} -o yaml + COMMAND +end + +def kubectl_apply_f(path, kubeconfig: KUBECONFIG) + kubectl "apply -f #{path}", kubeconfig: kubeconfig +end + +def kubectl_apply(manifest, kubeconfig: KUBECONFIG) + Tempfile.create do |temp_file| + temp_file.write manifest + temp_file.close + return kubectl_apply_f temp_file.path, kubeconfig: kubeconfig + end +end + +def pull_docker_images(images, dest_dir) + images.each do |image| + name, tag = image.split ':' + + path = "#{dest_dir}/#{name.gsub '/', '_'}.tar.zst" + + next if File.exist? path + + msg :info, "Pull #{name}:#{tag} -> #{path}" + + FileUtils.mkdir_p dest_dir + + bash <<~SCRIPT + skopeo copy 'docker://#{name}:#{tag}' 'docker-archive:/dev/fd/2:#{name}:#{tag}' 3>&1 1>&2 2>&3 \ + | zstd --ultra -o '#{path}' + SCRIPT + end +end + +def extract_images(manifest) + images = [] + + YAML.load_stream manifest do |document| + next if document.nil? + + if document.dig('kind') == 'HelmChart' + # NOTE: Aassuming all one-*.yaml manifests contain chartContent: and valuesContent: fields. + chart_tgz = Base64.decode64 document.dig('spec', 'chartContent') + values_yml = document.dig('spec', 'valuesContent') + + Dir.mktmpdir do |temp_dir| + file "#{temp_dir}/chart.tgz", chart_tgz, overwrite: true + file "#{temp_dir}/values.yml", values_yml, overwrite: true + images += extract_images bash("helm template '#{temp_dir}/chart.tgz' -f '#{temp_dir}/values.yml'") + end + + next + end + + containers = [] + containers += document.dig('spec', 'template', 'spec', 'containers') || [] + containers += document.dig('spec', 'template', 'spec', 'initContainers') || [] + containers += document.dig('spec', 'jobTemplate', 'spec', 'template', 'spec', 'containers') || [] + containers += document.dig('spec', 'jobTemplate', 'spec', 'template', 'spec', 'initContainers') || [] + + images += containers.map { |container| container.dig 'image' } + end + + images.uniq +end + +def pull_addon_images(addon_dir = ONE_ADDON_DIR, airgap_dir = ONE_AIRGAP_DIR) + Dir["#{addon_dir}/one-*.yaml"].each do |path| + manifest = File.read path + pull_docker_images extract_images(manifest), "#{airgap_dir}/#{File.basename(path, '.yaml')}/" + end +end + +# NOTE: This must be executed *before* starting rke2-server/agent services, +# otherwise images will not be loaded into containerd. +def include_images(name, airgap_dir = ONE_AIRGAP_DIR, image_dir = K8S_IMAGE_DIR) + FileUtils.mkdir_p image_dir + Dir["#{airgap_dir}/#{name}/*.tar.zst"].each do |path| + msg :info, "Include airgapped image: #{File.basename(path)}" + symlink = "#{image_dir}/#{File.basename(path)}" + File.symlink path, symlink unless File.exist? symlink + end +end + +# NOTE: This must be executed *after* starting rke2-server/agent services. +def include_manifests(name, addon_dir = ONE_ADDON_DIR, manifest_dir = K8S_MANIFEST_DIR) + FileUtils.mkdir_p manifest_dir + Dir["#{addon_dir}/#{name}*.yaml"].each do |path| + msg :info, "Include addon: #{File.basename(path)}" + symlink = "#{manifest_dir}/#{File.basename(path)}" + File.symlink path, symlink unless File.exist? symlink + end +end + +def with_policy_rc_d_disabled + file '/usr/sbin/policy-rc.d', 'exit 101', mode: 'a+x', overwrite: true + yield +ensure + file '/usr/sbin/policy-rc.d', 'exit 0', mode: 'a+x', overwrite: true +end + +def install_packages(packages, hold: false) + msg :info, "Install APT packages: #{packages.join(',')}" + + puts bash <<~SCRIPT + apt-get install -y #{packages.join(' ')} + SCRIPT + + bash <<~SCRIPT if hold + apt-mark hold #{packages.join(' ')} + SCRIPT +end + +def ipv4?(string) + string.is_a?(String) && IPAddr.new(string) ? true : false +rescue IPAddr::InvalidAddressError + false +end + +def integer?(string) + Integer(string) ? true : false +rescue ArgumentError + false +end + +alias port? integer? + +def tcp_port_open?(ipv4, port, seconds = 5) + # > If a block is given, the block is called with the socket. + # > The value of the block is returned. + # > The socket is closed when this method returns. + Socket.tcp(ipv4, port, connect_timeout: seconds) {} + true +rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ETIMEDOUT + false +end + +def http_status_200?(url, + cacert = '/var/lib/rancher/rke2/server/tls/server-ca.crt', + cert = '/var/lib/rancher/rke2/server/tls/client-admin.crt', + key = '/var/lib/rancher/rke2/server/tls/client-admin.key', + seconds = 5) + + url = URI.parse url + http = Net::HTTP.new url.host, url.port + + if url.scheme == 'https' + http.use_ssl = true + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + http.ca_file = cacert + http.cert = OpenSSL::X509::Certificate.new File.read cert + http.key = OpenSSL::PKey::EC.new File.read key + end + + http.open_timeout = seconds + + http.get(url.path).code == '200' +rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ETIMEDOUT, Net::OpenTimeout + false +end diff --git a/appliances/OneKE/appliance/helpers_spec.rb b/appliances/OneKE/appliance/helpers_spec.rb new file mode 100644 index 00000000..51e1f22f --- /dev/null +++ b/appliances/OneKE/appliance/helpers_spec.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require 'rspec' + +require_relative 'helpers.rb' + +RSpec.describe 'bash' do + it 'should raise' do + allow(self).to receive(:exit).and_return nil + expect { bash 'false', terminate: false }.to raise_error(RuntimeError) + end + it 'should not raise' do + allow(self).to receive(:exit).and_return nil + expect { bash 'false' }.not_to raise_error + end +end + +RSpec.describe 'ipv4?' do + it 'should evaluate to true' do + ipv4s = %w[ + 10.11.12.13 + 10.11.12.13/24 + 10.11.12.13/32 + 192.168.144.120 + ] + ipv4s.each do |item| + expect(ipv4?(item)).to be true + end + end + it 'should evaluate to false' do + ipv4s = %w[ + 10.11.12 + 10.11.12. + 10.11.12.256 + asd.168.144.120 + 192.168.144.96-192.168.144.120 + ] + ipv4s.each do |item| + expect(ipv4?(item)).to be false + end + end +end diff --git a/appliances/OneKE/appliance/kubernetes.rb b/appliances/OneKE/appliance/kubernetes.rb new file mode 100644 index 00000000..93fc0e7d --- /dev/null +++ b/appliances/OneKE/appliance/kubernetes.rb @@ -0,0 +1,312 @@ +# frozen_string_literal: true + +require 'securerandom' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' +require_relative 'vnf.rb' + +def install_kubernetes(airgap_dir = ONE_AIRGAP_DIR) + rke2_release_url = "https://github.com/rancher/rke2/releases/download/#{ONE_SERVICE_RKE2_VERSION}" + + msg :info, "Install RKE2 runtime: #{ONE_SERVICE_RKE2_VERSION}" + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2.linux-amd64.tar.gz' | tar -xz -f- -C /usr/local/ + SCRIPT + + msg :info, "Download RKE2 airgapped image archives: #{ONE_SERVICE_RKE2_VERSION}" + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-core.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-core/rke2-images-core.linux-amd64.tar.zst' + SCRIPT + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-multus.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-multus/rke2-images-multus.linux-amd64.tar.zst' + SCRIPT + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-cilium.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-cilium/rke2-images-cilium.linux-amd64.tar.zst' + SCRIPT + + msg :info, "Install Helm binary: #{ONE_SERVICE_HELM_VERSION}" + bash <<~SCRIPT + curl -fsSL 'https://get.helm.sh/helm-v#{ONE_SERVICE_HELM_VERSION}-linux-amd64.tar.gz' \ + | tar -xOz -f- linux-amd64/helm \ + | install -o 0 -g 0 -m u=rwx,go=rx -D /dev/fd/0 /usr/local/bin/helm + SCRIPT + + msg :info, 'Link kubectl binary' + File.symlink '/var/lib/rancher/rke2/bin/kubectl', '/usr/local/bin/kubectl' + + msg :info, 'Link crictl binary' + File.symlink '/var/lib/rancher/rke2/bin/crictl', '/usr/local/bin/crictl' + + msg :info, 'Set BASH profile defaults' + file '/etc/profile.d/98-oneke.sh', <<~PROFILE, mode: 'u=rw,go=r' + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml + PROFILE +end + +def configure_kubernetes(configure_cni: ->{}, configure_addons: ->{}) + node = detect_node + + if node[:init_master] + configure_cni.() + init_master + configure_addons.() + elsif node[:join_master] + configure_cni.() + join_master node[:token] + configure_addons.() + elsif node[:join_worker] + join_worker node[:token] + elsif node[:join_storage] + join_storage node[:token] + end + + node +end + +def wait_for_any_master(retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for any master to be available' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_any_master / #{retry_num}" + + master_vms_show.each do |master_vm| + ready = master_vm.dig 'VM', 'USER_TEMPLATE', 'READY' + next unless ready == 'YES' + + # Not using the CP/EP here, only a direct validation without going through VNF/LB. + # The first responding master wins. + + k8s_master = master_vm.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_MASTER' + next if k8s_master.nil? + + return master_vm if tcp_port_open? k8s_master, 6443 + end + + if retry_num.zero? + msg :error, 'No usable master found' + exit 1 + end + + sleep seconds + end +end + +def wait_for_control_plane(endpoint = K8S_CONTROL_PLANE_EP, retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for Control-Plane to be ready' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_control_plane / #{retry_num}" + + break if http_status_200? "https://#{endpoint}/readyz" + + if retry_num.zero? + msg :error, 'Control-Plane not ready' + exit 1 + end + + sleep seconds + end +end + +def wait_for_kubelets(retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for available Kubelets to be ready' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_kubelets / #{retry_num}" + + conditions = kubectl_get_nodes['items'].map do |node| + node.dig('status', 'conditions').find do |item| + item['reason'] == 'KubeletReady' && item['type'] == 'Ready' && item['status'] == 'True' + end + end + + break if conditions.all? + + if retry_num.zero? + msg :error, 'Kubelets not ready' + exit 1 + end + + sleep seconds + end +end + +def init_master + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + msg :info, 'Set this master to be the first VNF backend' + vnf_supervisor_setup_backend + vnf_control_plane_setup_backend + + cni = [] + cni << 'multus' if ONEAPP_K8S_MULTUS_ENABLED + cni << ONEAPP_K8S_CNI_PLUGIN + + server_config = { + 'node-name' => name, + 'token' => SecureRandom.uuid, + 'tls-san' => ONEAPP_K8S_EXTRA_SANS.split(',').map(&:strip).append(ONEAPP_VROUTER_ETH0_VIP0), + 'node-taint' => ['CriticalAddonsOnly=true:NoExecute'], + 'disable' => ['rke2-ingress-nginx'], + 'cni' => cni, + 'disable-kube-proxy' => ONEAPP_K8S_CNI_PLUGIN == 'cilium' + } + + msg :info, 'Prepare initial rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: false + + msg :info, "Initialize first master: #{name}" + bash 'systemctl enable rke2-server.service --now' + + server_config.merge!({ + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => File.read('/var/lib/rancher/rke2/server/node-token', encoding: 'utf-8').strip + }) + + msg :info, 'Normalize rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: true + + onegate_vm_update ["ONEGATE_K8S_MASTER=#{ipv4}", "ONEGATE_K8S_TOKEN=#{server_config['token']}"] + + wait_for_control_plane + wait_for_kubelets +end + +def join_master(token, retries = RETRIES, seconds = SECONDS) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + cni = [] + cni << 'multus' if ONEAPP_K8S_MULTUS_ENABLED + cni << ONEAPP_K8S_CNI_PLUGIN + + server_config = { + 'node-name' => name, + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token, + 'tls-san' => ONEAPP_K8S_EXTRA_SANS.split(',').map(&:strip).append(ONEAPP_VROUTER_ETH0_VIP0), + 'node-taint' => ['CriticalAddonsOnly=true:NoExecute'], + 'disable' => ['rke2-ingress-nginx'], + 'cni' => cni, + 'disable-kube-proxy' => ONEAPP_K8S_CNI_PLUGIN == 'cilium' + } + + msg :info, 'Prepare rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: true + + # The rke2-server systemd service restarts automatically and eventually joins. + # If it really cannot join we want to reflect this in OneFlow. + retries.times.to_a.reverse.each do |retry_num| + if retry_num.zero? + msg :error, 'Unable to join Control-Plane' + exit 1 + end + begin + msg :info, "Join master: #{name} / #{retry_num}" + bash 'systemctl enable rke2-server.service --now', terminate: false + rescue RuntimeError + sleep seconds + next + end + break + end + + onegate_vm_update ["ONEGATE_K8S_MASTER=#{ipv4}", "ONEGATE_K8S_TOKEN=#{server_config['token']}"] + + msg :info, 'Set this master to be a VNF backend' + vnf_supervisor_setup_backend + vnf_control_plane_setup_backend + + wait_for_control_plane + wait_for_kubelets +end + +def join_worker(token) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + agent_config = { + 'node-name' => name, + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token + } + + msg :info, 'Prepare rke2-agent config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(agent_config), overwrite: true + + msg :info, "Join worker: #{name}" + bash 'systemctl enable rke2-agent.service --now' +end + +def join_storage(token) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + agent_config = { + 'node-name' => name, + 'server ' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token, + 'node-taint' => ['node.longhorn.io/create-default-disk=true:NoSchedule'], + 'node-label' => ['node.longhorn.io/create-default-disk=true'] + } + + msg :info, 'Prepare rke2-agent config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(agent_config), overwrite: true + + msg :info, "Join storage: #{name}" + bash 'systemctl enable rke2-agent.service --now' +end + +def detect_node + current_vm = onegate_vm_show + current_vmid = current_vm.dig 'VM', 'ID' + current_role = current_vm.dig 'VM', 'USER_TEMPLATE', 'ROLE_NAME' + + master_vm = master_vm_show + master_vmid = master_vm.dig 'VM', 'ID' + + master_vm = wait_for_any_master if current_vmid != master_vmid + + token = master_vm.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_TOKEN' + + ready_to_join = !token.nil? + + results = { + init_master: current_role == 'master' && current_vmid == master_vmid && !ready_to_join, + join_master: current_role == 'master' && current_vmid != master_vmid && ready_to_join, + join_worker: current_role == 'worker' && current_vmid != master_vmid && ready_to_join, + join_storage: current_role == 'storage' && current_vmid != master_vmid && ready_to_join, + token: token + } + + msg :debug, "detect_node / #{results}" + results +end diff --git a/appliances/OneKE/appliance/longhorn.rb b/appliances/OneKE/appliance/longhorn.rb new file mode 100644 index 00000000..68d26dbf --- /dev/null +++ b/appliances/OneKE/appliance/longhorn.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_longhorn(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install Longhorn' + fetch_longhorn addon_dir + pull_longhorn_images if ONE_SERVICE_AIRGAPPED +end + +def prepare_dedicated_storage + msg :info, 'Setup dedicated storage and populate /etc/fstab' + + # Previously executed in a start script, moved here because the start script was causing race condition issues. + puts bash <<~SCRIPT + # Silently abort when there is no disk attached. + if ! lsblk -n -o name '#{ONEAPP_STORAGE_DEVICE}'; then exit 0; fi + + # Make sure mountpoint exists. + install -o 0 -g 0 -m u=rwx,go=rx -d '#{ONEAPP_STORAGE_MOUNTPOINT}' + + # Silently abort when mountpoint is taken. + if mountpoint '#{ONEAPP_STORAGE_MOUNTPOINT}'; then exit 0; fi + + # Create new filesystem if the device does not contain any. + if ! blkid -s TYPE -o value '#{ONEAPP_STORAGE_DEVICE}'; then + 'mkfs.#{ONEAPP_STORAGE_FILESYSTEM}' '#{ONEAPP_STORAGE_DEVICE}' + fi + + export STORAGE_UUID=$(blkid -s UUID -o value '#{ONEAPP_STORAGE_DEVICE}') + # Assert that the detected UUID is not empty. + if [[ -z "$STORAGE_UUID" ]]; then exit 1; fi + + # Update fstab if necessary. + gawk -i inplace -f- /etc/fstab <s" + valuesContent: | + defaultSettings: + createDefaultDiskLabeledNodes: true + taintToleration: "node.longhorn.io/create-default-disk=true:NoSchedule" + longhornManager: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + longhornDriver: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + nodeSelector: + node.longhorn.io/create-default-disk: "true" + longhornUI: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + nodeSelector: + node.longhorn.io/create-default-disk: "true" + --- + # Please note, changing default storage class is discouraged: https://longhorn.io/docs/1.3.0/best-practices/#storageclass + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn-retain + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: Retain + volumeBindingMode: Immediate + parameters: + fsType: "ext4" + numberOfReplicas: "3" + staleReplicaTimeout: "2880" + fromBackup: "" + MANIFEST + + msg :info, "Generate Longhorn addon manifest: #{ONEAPP_K8S_LONGHORN_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull longhorn/longhorn --version '#{ONEAPP_K8S_LONGHORN_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/longhorn-#{ONEAPP_K8S_LONGHORN_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-longhorn.yaml", manifest, overwrite: true + end +end + +def pull_longhorn_images(airgap_dir = ONE_AIRGAP_DIR) + # https://longhorn.io/docs/1.3.0/advanced-resources/deploy/airgap/ + + msg :info, "Pull Longhorn images: #{ONEAPP_K8S_LONGHORN_CHART_VERSION}" + + images = bash <<~SCRIPT, chomp: true + curl -fsSL 'https://raw.githubusercontent.com/longhorn/longhorn/v#{ONEAPP_K8S_LONGHORN_CHART_VERSION}/deploy/longhorn-images.txt' + SCRIPT + + images = images.lines + .map(&:strip) + .reject(&:empty?) + + pull_docker_images images, "#{airgap_dir}/one-longhorn/" +end diff --git a/appliances/OneKE/appliance/metallb.rb b/appliances/OneKE/appliance/metallb.rb new file mode 100644 index 00000000..d290d40b --- /dev/null +++ b/appliances/OneKE/appliance/metallb.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_metallb(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install MetalLB' + fetch_metallb addon_dir +end + +def configure_metallb(addon_dir = ONE_ADDON_DIR) + msg :info, 'Configure MetalLB' + + if ONEAPP_K8S_METALLB_CONFIG.nil? + msg :info, 'Create MetalLB CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: [] + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: [default] + MANIFEST + + unless ONEAPP_K8S_METALLB_RANGES.empty? + ip_address_pool = documents.find do |doc| + doc['kind'] == 'IPAddressPool' && doc.dig('metadata', 'name') == 'default' + end + ip_address_pool['spec']['addresses'] = extract_metallb_ranges.map { |item| item.join('-') } + end + else + msg :info, 'Use MetalLB user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_METALLB_CONFIG + end + + msg :info, 'Generate MetalLB config manifest' + manifest = YAML.dump_stream *documents + file "#{addon_dir}/one-metallb-config.yaml", manifest, overwrite: true +end + +def fetch_metallb(addon_dir = ONE_ADDON_DIR) + bash <<~SCRIPT + helm repo add metallb https://metallb.github.io/metallb + helm repo update + SCRIPT + + manifest = <<~MANIFEST + --- + apiVersion: v1 + kind: Namespace + metadata: + name: metallb-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: one-metallb + namespace: kube-system + spec: + bootstrap: false + targetNamespace: metallb-system + chartContent: "%s" + valuesContent: | + controller: + image: + pullPolicy: IfNotPresent + speaker: + image: + pullPolicy: IfNotPresent + MANIFEST + + msg :info, "Generate MetalLB addon manifest: #{ONEAPP_K8S_METALLB_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull metallb/metallb --version '#{ONEAPP_K8S_METALLB_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/metallb-#{ONEAPP_K8S_METALLB_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-metallb.yaml", manifest, overwrite: true + end +end + +def extract_metallb_ranges(ranges = ONEAPP_K8S_METALLB_RANGES) + ranges.compact + .map(&:strip) + .reject(&:empty?) + .map { |item| item.split('-').map(&:strip) } + .reject { |item| item.length > 2 } + .map { |item| item.length == 1 ? [item.first, item.first] : item } + .reject { |item| item.map(&:empty?).any? } + .reject { |item| !(ipv4?(item.first) && ipv4?(item.last)) } +end diff --git a/appliances/OneKE/appliance/metallb_spec.rb b/appliances/OneKE/appliance/metallb_spec.rb new file mode 100644 index 00000000..89e4b353 --- /dev/null +++ b/appliances/OneKE/appliance/metallb_spec.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true + +require 'base64' +require 'rspec' +require 'tmpdir' +require 'yaml' + +require_relative 'metallb.rb' + +RSpec.describe 'extract_metallb_ranges' do + it 'should extract and return all ranges (positive)' do + input = [ + '10.11.12.13', + '10.11.12.13-', + '10.11.12.13-10.11.12.31', + ' 10.11.12.13-10.11.12.31', + '10.11.12.13-10.11.12.31 ', + '10.11.12.13 -10.11.12.31', + '10.11.12.13- 10.11.12.31' + ] + output = [ + %w[10.11.12.13 10.11.12.13], + %w[10.11.12.13 10.11.12.13], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31] + ] + expect(extract_metallb_ranges(input)).to eq output + end + + it 'should extract and return no ranges (negative)' do + input = [ + '', + '-10.11.12.13', + 'asd.11.12.13-10.11.12.31', + '10.11.12.13-10.11.12.31-10.11.12.123' + ] + output = [] + expect(extract_metallb_ranges(input)).to eq output + end +end + +RSpec.describe 'configure_metallb' do + it 'should apply user-defined ranges (empty)' do + stub_const 'ONEAPP_K8S_METALLB_CONFIG', nil + stub_const 'ONEAPP_K8S_METALLB_RANGES', [] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: [] + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined ranges' do + stub_const 'ONEAPP_K8S_METALLB_CONFIG', nil + stub_const 'ONEAPP_K8S_METALLB_RANGES', ['192.168.150.87-192.168.150.88'] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: + - 192.168.150.87-192.168.150.88 + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined config manifest (and ignore user-defined ranges)' do + manifest = <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: + - 192.168.150.87-192.168.150.88 + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + stub_const 'ONEAPP_K8S_METALLB_CONFIG', Base64.encode64(manifest) + stub_const 'ONEAPP_K8S_METALLB_RANGES', ['1.2.3.4-1.2.3.4'] + output = YAML.load_stream manifest + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + +end diff --git a/appliances/OneKE/appliance/multus.rb b/appliances/OneKE/appliance/multus.rb new file mode 100644 index 00000000..ee040e1b --- /dev/null +++ b/appliances/OneKE/appliance/multus.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_multus(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Multus' + + if ONEAPP_K8S_MULTUS_CONFIG.nil? + msg :info, 'Create Multus CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-multus + namespace: kube-system + spec: + valuesContent: |- + rke2-whereabouts: + enabled: true + MANIFEST + else + msg :info, 'Use Multus user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_MULTUS_CONFIG + end + + msg :info, 'Generate Multus config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-multus-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/onegate.rb b/appliances/OneKE/appliance/onegate.rb new file mode 100644 index 00000000..f1c7e511 --- /dev/null +++ b/appliances/OneKE/appliance/onegate.rb @@ -0,0 +1,133 @@ +# frozen_string_literal: true + +require 'json' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def onegate_service_show + JSON.parse bash 'onegate --json service show' +end + +def onegate_vm_show(vmid = '') + JSON.parse bash "onegate --json vm show #{vmid}" +end + +def onegate_vm_update(data, vmid = '') + bash "onegate vm update #{vmid} --data \"#{data.join('\n')}\"" +end + +def ip_addr_show(ifname = '') + JSON.parse bash "ip --json addr show #{ifname}" +end + +def all_vms_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + vmids = roles.each_with_object [] do |role, acc| + nodes = role.dig 'nodes' + next if nodes.nil? + + nodes.each do |node| + acc << node.dig('vm_info', 'VM', 'ID') + end + end + + vmids.each_with_object [] do |vmid, acc| + acc << onegate_vm_show(vmid) + end +end + +def master_vms_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + role = roles.find { |item| item['name'] == 'master' } + if role.nil? + msg :error, 'No master role found in Onegate' + exit 1 + end + + nodes = role.dig 'nodes' + if nodes.empty? + msg :error, 'No master nodes found in Onegate' + exit 1 + end + + vmids = nodes.map { |node| node.dig 'vm_info', 'VM', 'ID' } + + vmids.each_with_object [] do |vmid, acc| + acc << onegate_vm_show(vmid) + end +end + +def master_vm_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + role = roles.find { |item| item['name'] == 'master' } + if role.nil? + msg :error, 'No master role found in Onegate' + exit 1 + end + + nodes = role.dig 'nodes' + if nodes.empty? + msg :error, 'No nodes found in Onegate' + exit 1 + end + + vmid = nodes.first.dig 'vm_info', 'VM', 'ID' + + onegate_vm_show vmid +end + +def external_ipv4s + onegate_vm = onegate_vm_show + + nics = onegate_vm.dig 'VM', 'TEMPLATE', 'NIC' + if nics.empty? + msg :error, 'No nics found in Onegate' + exit 1 + end + + ip_addr = ip_addr_show + if ip_addr.empty? + msg :error, 'No local addresses found' + exit 1 + end + + ipv4s = nics.each_with_object [] do |nic, acc| + addr = ip_addr.find do |item| + next unless item['address'].downcase == nic['MAC'].downcase + + item['addr_info'].find do |info| + info['family'] == 'inet' && info['local'] == nic['IP'] + end + end + acc << nic['IP'] unless addr.nil? + end + + if ipv4s.empty? + msg :error, 'No IPv4 addresses found' + exit 1 + end + + ipv4s +end diff --git a/appliances/OneKE/appliance/onegate_spec.rb b/appliances/OneKE/appliance/onegate_spec.rb new file mode 100644 index 00000000..67aa0e7e --- /dev/null +++ b/appliances/OneKE/appliance/onegate_spec.rb @@ -0,0 +1,559 @@ +# frozen_string_literal: true + +require 'json' +require 'rspec' + +require_relative 'onegate.rb' + +RSpec.describe 'all_vms_show' do + before do + @svc = JSON.parse(<<~JSON) + { + "SERVICE": { + "name": "asd", + "id": "21", + "state": 2, + "roles": [ + { + "name": "master", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 49, + "running": null, + "vm_info": { + "VM": { + "ID": "49", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_0_(service_21)" + } + } + } + ] + }, + { + "name": "worker", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 50, + "running": null, + "vm_info": { + "VM": { + "ID": "50", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "worker_0_(service_21)" + } + } + } + ] + }, + { + "name": "storage", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 51, + "running": null, + "vm_info": { + "VM": { + "ID": "51", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "storage_0_(service_21)" + } + } + } + ] + } + ] + } + } + JSON + @vms = [] + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_0_(service_21)", + "ID": "49", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_HASH": "09a9ed140fec2fa1a2281a3125952d6f2951b67a67534647b0a606ae2d478f60", + "ONEGATE_K8S_MASTER": "172.20.0.100", + "ONEGATE_K8S_TOKEN": "sg7711.p19vy0eqxefc0lqz", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "worker_0_(service_21)", + "ID": "50", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "READY": "YES", + "ROLE_NAME": "worker", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.101", + "MAC": "02:00:ac:14:00:65", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "storage_0_(service_21)", + "ID": "51", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "READY": "YES", + "ROLE_NAME": "storage", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.102", + "MAC": "02:00:ac:14:00:66", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + end + it 'should return all vms belonging to svc' do + allow(self).to receive(:onegate_service_show).and_return(@svc) + allow(self).to receive(:onegate_vm_show).and_return(*@vms) + expect(all_vms_show.map { |item| item['VM']['TEMPLATE']['NIC'][0]['IP'] }).to eq ['172.20.0.100', '172.20.0.101', '172.20.0.102'] + end +end + +RSpec.describe 'master_vms_show' do + before do + @svc = JSON.parse(<<~JSON) + { + "SERVICE": { + "name": "asd", + "id": "4", + "state": 10, + "roles": [ + { + "name": "vnf", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 12, + "running": null, + "vm_info": { + "VM": { + "ID": "12", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "vnf_0_(service_4)" + } + } + } + ] + }, + { + "name": "master", + "cardinality": 3, + "state": 10, + "nodes": [ + { + "deploy_id": 13, + "running": null, + "vm_info": { + "VM": { + "ID": "13", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_0_(service_4)" + } + } + }, + { + "deploy_id": 14, + "running": null, + "vm_info": { + "VM": { + "ID": "14", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_1_(service_4)" + } + } + }, + { + "deploy_id": 15, + "running": null, + "vm_info": { + "VM": { + "ID": "15", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_2_(service_4)" + } + } + } + ] + }, + { + "name": "worker", + "cardinality": 0, + "state": 2, + "nodes": [] + }, + { + "name": "storage", + "cardinality": 0, + "state": 2, + "nodes": [] + } + ] + } + } + JSON + @vms = [] + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_0_(service_4)", + "ID": "13", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_HASH": "c74201821cb4878b6896d3284f825be738cb11dbc2c5153e88c84da0b3d3ab04", + "ONEGATE_K8S_KEY": "146ecb3e9d8bce9f584f55b234bd2700d2a7747177fb8fd60f42a161a48e7c07", + "ONEGATE_K8S_MASTER": "10.2.11.201", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-201", + "ONEGATE_K8S_TOKEN": "ifv2c4.h8d88lzjlyl5mkod", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.201", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.201", + "MAC": "02:00:0a:02:0b:c9", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_1_(service_4)", + "ID": "14", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-202", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.202", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.202", + "MAC": "02:00:0a:02:0b:ca", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_2_(service_4)", + "ID": "15", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-203", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.203", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.203", + "MAC": "02:00:0a:02:0b:cb", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + end + it 'should return all vms belonging to the master role' do + allow(self).to receive(:onegate_service_show).and_return(@svc) + allow(self).to receive(:onegate_vm_show).and_return(*@vms) + expect(master_vms_show.map { |item| item['VM']['TEMPLATE']['NIC'][0]['IP'] }).to eq ['10.2.11.201', '10.2.11.202', '10.2.11.203'] + end +end + +RSpec.describe 'external_ipv4s' do + it 'should return list of ipv4 addresses' do + allow(self).to receive(:onegate_vm_show).and_return JSON.parse <<~JSON + { + "VM": { + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ] + } + } + } + JSON + allow(self).to receive(:ip_addr_show).and_return JSON.parse <<~JSON + [ + { + "ifindex": 1, + "ifname": "lo", + "flags": [ + "LOOPBACK", + "UP", + "LOWER_UP" + ], + "mtu": 65536, + "qdisc": "noqueue", + "operstate": "UNKNOWN", + "group": "default", + "txqlen": 1000, + "link_type": "loopback", + "address": "00:00:00:00:00:00", + "broadcast": "00:00:00:00:00:00", + "addr_info": [ + { + "family": "inet", + "local": "127.0.0.1", + "prefixlen": 8, + "scope": "host", + "label": "lo", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + }, + { + "family": "inet6", + "local": "::1", + "prefixlen": 128, + "scope": "host", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + }, + { + "ifindex": 2, + "ifname": "eth0", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP", + "LOWER_UP" + ], + "mtu": 1500, + "qdisc": "pfifo_fast", + "operstate": "UP", + "group": "default", + "txqlen": 1000, + "link_type": "ether", + "address": "02:00:ac:14:00:64", + "broadcast": "ff:ff:ff:ff:ff:ff", + "addr_info": [ + { + "family": "inet", + "local": "172.20.0.100", + "prefixlen": 24, + "broadcast": "172.20.0.255", + "scope": "global", + "label": "eth0", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + }, + { + "family": "inet6", + "local": "fe80::acff:fe14:64", + "prefixlen": 64, + "scope": "link", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + }, + { + "ifindex": 3, + "ifname": "docker0", + "flags": [ + "NO-CARRIER", + "BROADCAST", + "MULTICAST", + "UP" + ], + "mtu": 1500, + "qdisc": "noqueue", + "operstate": "DOWN", + "group": "default", + "link_type": "ether", + "address": "02:42:04:21:6f:5d", + "broadcast": "ff:ff:ff:ff:ff:ff", + "addr_info": [ + { + "family": "inet", + "local": "172.17.0.1", + "prefixlen": 16, + "broadcast": "172.17.255.255", + "scope": "global", + "label": "docker0", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + } + ] + JSON + expect(external_ipv4s).to eq ['172.20.0.100'] + end +end diff --git a/appliances/OneKE/appliance/traefik.rb b/appliances/OneKE/appliance/traefik.rb new file mode 100644 index 00000000..7b52c373 --- /dev/null +++ b/appliances/OneKE/appliance/traefik.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_traefik(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install Traefik' + fetch_traefik addon_dir +end + +def fetch_traefik(addon_dir = ONE_ADDON_DIR) + bash <<~SCRIPT + helm repo add traefik https://helm.traefik.io/traefik + helm repo update + SCRIPT + + manifest = <<~MANIFEST + --- + apiVersion: v1 + kind: Namespace + metadata: + name: traefik-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: one-traefik + namespace: kube-system + spec: + bootstrap: false + targetNamespace: traefik-system + chartContent: "%s" + valuesContent: | + deployment: + replicas: 2 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: traefik + service: + type: NodePort + ports: + web: + nodePort: 32080 + websecure: + nodePort: 32443 + MANIFEST + + msg :info, "Generate Traefik addon manifest: #{ONEAPP_K8S_TRAEFIK_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull traefik/traefik --version '#{ONEAPP_K8S_TRAEFIK_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/traefik-#{ONEAPP_K8S_TRAEFIK_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-traefik.yaml", manifest, overwrite: true + end +end diff --git a/appliances/OneKE/appliance/vnf.rb b/appliances/OneKE/appliance/vnf.rb new file mode 100644 index 00000000..37af622a --- /dev/null +++ b/appliances/OneKE/appliance/vnf.rb @@ -0,0 +1,144 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' + +def configure_vnf(gw_ipv4 = ONEAPP_VROUTER_ETH1_VIP0) + gw_ok = !gw_ipv4.nil? && ipv4?(gw_ipv4) + + if gw_ok + msg :debug, 'Configure default gateway (temporarily)' + bash "ip route replace default via #{gw_ipv4} dev eth0" + end + + msg :info, 'Install the vnf-restore service' + + file '/etc/systemd/system/vnf-restore.service', <<~SERVICE + [Unit] + After=network.target + + [Service] + Type=oneshot + ExecStart=/bin/sh -ec '#{gw_ok ? "ip route replace default via #{gw_ipv4} dev eth0" : ':'}' + + [Install] + WantedBy=multi-user.target + SERVICE + + # Make sure vnf-restore is triggered everytime one-context-reconfigure.service runs + file '/etc/systemd/system/one-context-reconfigure.service.d/vnf-restore.conf', <<~SERVICE + [Service] + ExecStartPost=/usr/bin/systemctl restart vnf-restore.service + SERVICE + + msg :info, 'Enable and start the vnf-restore service' + bash <<~SCRIPT + systemctl daemon-reload + systemctl enable vnf-restore.service --now + SCRIPT +end + +def vnf_supervisor_setup_backend(index = 0, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = 9345) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{lb_port}" + ] +end + +def vnf_control_plane_setup_backend(index = 1, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = 6443) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{lb_port}" + ] +end + +def vnf_ingress_setup_https_backend(index = 2, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = ONEAPP_VNF_HAPROXY_LB2_PORT) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + server_port = lb_port.to_i + 32_000 + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{server_port}" + ] +end + +def vnf_ingress_setup_http_backend(index = 3, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = ONEAPP_VNF_HAPROXY_LB3_PORT) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + server_port = lb_port.to_i + 32_000 + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{server_port}" + ] +end diff --git a/appliances/lib/common.sh b/appliances/lib/common.sh new file mode 100644 index 00000000..253aeb72 --- /dev/null +++ b/appliances/lib/common.sh @@ -0,0 +1,503 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + + +# shellcheck disable=SC2086 +true + + +# args: +msg() +{ + msg_type="$1" + shift + + case "$msg_type" in + info) + printf "[%s] => " "$(date)" + echo 'INFO:' "$@" + ;; + debug) + printf "[%s] => " "$(date)" >&2 + echo 'DEBUG:' "$@" >&2 + ;; + warning) + printf "[%s] => " "$(date)" >&2 + echo 'WARNING [!]:' "$@" >&2 + ;; + error) + printf "[%s] => " "$(date)" >&2 + echo 'ERROR [!!]:' "$@" >&2 + return 1 + ;; + *) + printf "[%s] => " "$(date)" >&2 + echo 'UNKNOWN [?!]:' "$@" >&2 + return 2 + ;; + esac + return 0 +} + +# arg: +gen_password() +{ + pw_length="${1:-16}" + new_pw='' + + while true ; do + if command -v pwgen >/dev/null ; then + new_pw=$(pwgen -s "${pw_length}" 1) + break + elif command -v openssl >/dev/null ; then + new_pw="${new_pw}$(openssl rand -base64 ${pw_length} | tr -dc '[:alnum:]')" + else + new_pw="${new_pw}$(head /dev/urandom | tr -dc '[:alnum:]')" + fi + # shellcheck disable=SC2000 + [ "$(echo $new_pw | wc -c)" -ge "$pw_length" ] && break + done + + echo "$new_pw" | cut -c1-${pw_length} +} + +# arg: +is_ipv4_address() +{ + echo "$1" | grep '^[0-9.]*$' | awk ' + BEGIN { + FS = "."; + octet = 0; + } + { + for(i = 1; i <= NF; i++) + if (($i >= 0) && ($i <= 255)) + octet++; + } + END { + if (octet == 4) + exit 0; + else + exit 1; + }' +} + +get_local_ip() +{ + extif=$(ip r | awk '{if ($1 == "default") print $5;}') + local_ip=$(ip a show dev "$extif" | \ + awk '{if ($1 == "inet") print $2;}' | sed -e '/^127\./d' -e 's#/.*##') + + echo "${local_ip:-127.0.0.1}" +} + +# arg: +is_my_ip() +( + _ip="$1" + + _local_ips=$(ip a | \ + sed -n 's#^[[:space:]]*inet[[:space:]]\+\([^/[:space:]]\+\)[/[:space:]].*#\1#p') + + for _local_ip in ${_local_ips} ; do + if [ "$_ip" = "$_local_ip" ] ; then + return 0 + fi + done + + return 1 +) + +# returns an netmask in the old notation, eg.: 255.255.255.255 +# arg: +# +# NOTE: shamelessly copied from here: +# https://forums.gentoo.org/viewtopic-t-888736-start-0.html +cidr_to_mask () +( + # Number of args to shift, 255..255, first non-255 byte, zeroes + set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 + [ $1 -gt 1 ] && shift $1 || shift + echo ${1-0}.${2-0}.${3-0}.${4-0} +) + +# Gets the network part of an IP +# arg: +get_network_ip() +( + awk -v ip="$1" -v mask="$2" 'END { + split(ip, ip_b, "."); split(mask, mask_b, "."); + for (i=1; i<=4; ++i) x = x "." and(ip_b[i], mask_b[i]); + sub(/^./, "", x); print x; }' +# +# NOTE: this originally never worked properly: +# https://gitlab.com/openconnect/vpnc-scripts/-/merge_requests/5 +# +# The fix is to first find the network address. +get_gw_ip() +( + _ip=$(echo "$1" | awk 'BEGIN{FS="/"}{print $1;}') + _mask=$(echo "$1" | awk 'BEGIN{FS="/"}{print $2;}') + + if echo "$_mask" | grep -q '^[0-9][0-9]*$' && [ "$_mask" -le 32 ] ; then + # ip had cidr prefix - we will find network ip + _mask=$(cidr_to_mask "$_mask") + _ip=$(get_network_ip "$_ip" "$_mask") + elif [ -n "$_mask" ] ; then + # netmask is garbage + return 1 + fi + + ip r g "$_ip" 2>/dev/null | awk ' + { + for(i = 1; i <= NF; i++) + { + if ($i == "src") + { + print $(i + 1); + exit 0; + } + } + } + ' +) + +# it will create a new hostname from an ip address, but only if the current one +# is just localhost and in that case it will also prints it on the stdout +# arg: [] +generate_hostname() +( + if [ "$(hostname -s)" = localhost ] ; then + if [ -n "$1" ] ; then + _new_hostname="$(echo $1 | tr -d '[:space:]' | tr '.' '-')" + else + _new_hostname="one-$(get_local_ip | tr '.' '-')" + fi + hostname "$_new_hostname" + hostname > /etc/hostname + hostname -s + fi +) + +# show default help based on the ONE_SERVICE_PARAMS +# service_help in appliance.sh may override this function +default_service_help() +{ + echo "USAGE: " + + for _command in 'help' 'install' 'configure' 'bootstrap'; do + echo " $(basename "$0") ${_command}" + + case "${_command}" in + help) echo ' Prints this help' ;; + install) echo ' Installs service' ;; + configure) echo ' Configures service via contextualization or defaults' ;; + bootstrap) echo ' Bootstraps service via contextualization' ;; + esac + + local _index=0 + while [ -n "${ONE_SERVICE_PARAMS[${_index}]}" ]; do + local _name="${ONE_SERVICE_PARAMS[${_index}]}" + local _type="${ONE_SERVICE_PARAMS[$((_index + 1))]}" + local _desc="${ONE_SERVICE_PARAMS[$((_index + 2))]}" + local _input="${ONE_SERVICE_PARAMS[$((_index + 3))]}" + _index=$((_index + 4)) + + if [ "${_command}" = "${_type}" ]; then + if [ -z "${_input}" ]; then + echo -n ' ' + else + echo -n ' * ' + fi + + printf "%-25s - %s\n" "${_name}" "${_desc}" + fi + done + + echo + done + + echo 'Note: (*) variables are provided to the user via USER_INPUTS' +} + +#TODO: more or less duplicate to common.sh/service_help() +params2md() +{ + local _command=$1 + + local _index=0 + local _count=0 + while [ -n "${ONE_SERVICE_PARAMS[${_index}]}" ]; do + local _name="${ONE_SERVICE_PARAMS[${_index}]}" + local _type="${ONE_SERVICE_PARAMS[$((_index + 1))]}" + local _desc="${ONE_SERVICE_PARAMS[$((_index + 2))]}" + local _input="${ONE_SERVICE_PARAMS[$((_index + 3))]}" + _index=$((_index + 4)) + + if [ "${_command}" = "${_type}" ] && [ -n "${_input}" ]; then + # shellcheck disable=SC2016 + printf '* `%s` - %s\n' "${_name}" "${_desc}" + _count=$((_count + 1)) + fi + done + + if [ "${_count}" -eq 0 ]; then + echo '* none' + fi +} + +create_one_service_metadata() +{ + # shellcheck disable=SC2001 + cat >"${ONE_SERVICE_METADATA}" < [] +# use in pipe with yum -y --showduplicates list +# yum version follows these rules: +# starting at the first colon (:) and up to the first hyphen (-) +# example: +# 3:18.09.1-3.el7 -> 18.09.1 +yum_pkg_filter() +{ + _pkg="$1" + _version="$2" + + awk -v pkg="$_pkg" '{if ($1 ~ "^" pkg) print $2;}' | \ + sed -e 's/^[^:]*://' -e 's/-.*//' | \ + if [ -n "$_version" ] ; then + # only the correct versions + awk -v version="$_version" ' + { + if ($1 ~ "^" version) + print $1; + }' + else + cat + fi +} + +# arg: +is_in_list() +{ + _word="$1" + shift + + # shellcheck disable=SC2048 + for i in $* ; do + if [ "$_word" = "$i" ] ; then + return 0 + fi + done + + return 1 +} + +# arg: +is_true() +{ + _value=$(eval echo "\$${1}" | tr '[:upper:]' '[:lower:]') + case "$_value" in + 1|true|yes|y) + return 0 + ;; + esac + + return 1 +} + +# arg: [context file] +save_context_base64() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + msg info "Store current context in the file: ${_context_file}" + _context_vars=$(set | sed -n 's/^\(ONEAPP_[^=[:space:]]\+\)=.*/\1/p') + + if ! [ -f "$_context_file" ] ; then + echo '{}' > "$_context_file" + fi + + _old_context=$(cat "$_context_file") + + { + echo "$_old_context" + + for _context_var in ${_context_vars} ; do + _value=$(eval "printf \"\$${_context_var}\"") + echo '{}' | jq -S --arg val "$_value" ". + {\"${_context_var}\": \$val | @base64}" + done + } | jq -sS add > "$_context_file" +} + +# arg: [context file] +save_context() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + msg info "Store current context in the file: ${_context_file}" + + "${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + update "${_context_file}" +} + +# arg: [context file] +load_context() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + if ! [ -f "${_context_file}" ] ; then + msg info "Create empty context file: ${_context_file}" + echo '{}' > "${_context_file}" + return 0 + fi + + msg info "Load last context from the file: ${_context_file}" + + _vars=$("${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + -t names load "${_context_file}") + + for i in $_vars ; do + _value=$(get_value_from_context_file "${i}" "${_context_file}") + eval "${i}=\$(echo \"\$_value\")" + # shellcheck disable=SC2163 + export "${i}" + done +} + +# arg: [context file] +get_changed_context_vars() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + if ! [ -f "${_context_file}" ] ; then + return 0 + fi + + "${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + -t names compare "${_context_file}" +} + +# arg: [] +get_value_from_context_file() +{ + _var="$1" + _context_file="${2:-$ONE_SERVICE_CONTEXTFILE}" + + [ -z "${_var}" ] && return 1 + + jq -cr ".${_var}" < "${_context_file}" +} + +# arg: +is_context_variable_updated() +{ + _varname="$1" + + for v in $(get_changed_context_vars "${ONE_SERVICE_CONTEXTFILE}") ; do + if [ "$v" = "${_varname}" ] ; then + # variable has been updated + return 0 + fi + done + + return 1 +} + +# arg: +check_pidfile() +{ + _pidfile="$1" + + if [ -f "${_pidfile}" ] ; then + _pid=$(grep '^[0-9]\+$' "${_pidfile}") + else + _pid= + fi + + if [ -n "${_pid}" ] ; then + kill -0 ${_pid} + return $? + fi + + return 1 +} + +# arg: +wait_for_pidfile() +{ + _pidfile="$1" + _timeout=60 # we wait at most one minute... + + while [ "$_timeout" -gt 0 ]; do + # we wait for the pidfile to emerge... + if [ -f "$_pidfile" ] ; then + _pid=$(cat "$_pidfile") + # we retry until the pid in pidfile is a number... + if echo "$_pid" | grep -q '^[0-9]\+$' ; then + # the pid must be stable for 3 seconds... + _check_time=3 + while [ "$_check_time" -gt 0 ] ; do + sleep 1s + if kill -0 "$_pid" ; then + _check_time=$(( _check_time - 1 )) + else + break + fi + done + if [ "$_check_time" -eq 0 ] ; then + # we succeeded - we have valid pid... + break + fi + fi + fi + + sleep 1s + _timeout=$(( _timeout - 1 )) + done +} + +wait_for_file() +( + _timeout=60 # we wait at most one minute... + + while [ "$_timeout" -gt 0 ] ; do + if [ -e "$1" ] ; then + return 0 + fi + + sleep 1s + _timeout=$(( _timeout - 1 )) + done + + return 1 +) + diff --git a/appliances/lib/context-helper.py b/appliances/lib/context-helper.py new file mode 100755 index 00000000..d923616d --- /dev/null +++ b/appliances/lib/context-helper.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 + +# --------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# --------------------------------------------------------------------------- # + + +import sys +import os +import argparse +import re +import json + + +JSON_INDENT = 4 + + +class SaveFileError(Exception): + """When there is an issue with writing to the context file.""" + pass + + +class OpenFileError(Exception): + """When there is an issue with opening the context file.""" + pass + + +def get_current_context(env_prefix): + """ Returns all env. variables where names start with 'env_prefix'. """ + + context = {} + regex = re.compile("^" + env_prefix) + for env_var in os.environ: + if regex.search(env_var): + context[env_var] = os.environ[env_var] + + return context + + +def get_file_context(env_prefix, context_file): + """ + Returns all env. variables from 'context_file' where names start with + 'env_prefix'. + . + """ + + # load context file + with open(context_file, "r") as f: + file_context = json.load(f) + + # mark all not matching prefix + regex = re.compile("^" + env_prefix) + to_delete = [] + for env_var in file_context: + if not regex.search(env_var): + to_delete.append(env_var) + + # delete all non-matching env. vars + for env_var in to_delete: + del file_context[env_var] + + return file_context + + +def save_context(env_prefix, context_file, json_indent=JSON_INDENT): + """ + Saves current context (env. variables with matching 'env_prefix') into the + 'context_file'. + + It will overwrite the existing file if it exists! + + Returns context. + """ + + context = get_current_context(env_prefix) + with open(context_file, "w") as f: + f.write(json.dumps(context, indent=json_indent)) + f.write("\n") + + return context + + +def load_context(env_prefix, context_file): + """ + It loads context from the 'context_file'. It will load only those + variables matching 'env_prefix' and which are not yet in the current + context. + + It will NOT overwrite any variable in the current context! + + Returns result context as described above. + + NOTE: + Because it is impossible to modify environment of the caller - the result + from this function should dumped to the stdout as a json, which must be + sourced later by the caller (eg: shell script). + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # filter only those not in context already + context = get_current_context(env_prefix) + result = {} + for file_env in file_context: + if context.get(file_env) is None: + result[file_env] = file_context[file_env] + + return result + + +def update_context(env_prefix, context_file, json_indent=JSON_INDENT): + """ + Similar to save but it will only update the file - it will overwrite + existing variables in the 'context_file' with those from the current + context but it will leave the rest intact. + + Returns full content of the file as context. + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # load current context + context = get_current_context(env_prefix) + + # update file context with current context + for env_var in context: + file_context[env_var] = context[env_var] + + # write updated content back + with open(context_file, "w") as f: + f.write(json.dumps(file_context, indent=json_indent)) + f.write("\n") + + return file_context + + +def compare_context(env_prefix, context_file): + """ + It will return keypairs of context variables which differs from the + 'context_file' and the current context. + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # load current context + context = get_current_context(env_prefix) + + # find all changed + result = {} + for env_var in context: + if file_context.get(env_var) != context.get(env_var): + result[env_var] = context[env_var] + + # when variable was not changed but deleted + # TO NOTE: currently not usable because VNF is setting defaults in context.json + # + #for env_var in file_context: + # if context.get(env_var) is None: + # result[env_var] = "" + + return result + + +def error_msg(msg): + length = 80 + line = "" + for word in msg.split(' '): + if (len(line + ' ' + word)) < length: + line = line.strip() + ' ' + word + else: + print(line, file=sys.stderr) + line = word + if (line != ""): + print(line, file=sys.stderr) + + +def print_result(context, output_type, json_indent=JSON_INDENT): + """ + Prints context according to output type (the whole json, or just variable + names - each on separate line - for simple usage). + """ + + if output_type == 'json': + print(json.dumps(context, indent=json_indent)) + elif output_type == 'names': + for i in context: + print(i) + elif output_type == 'shell': + for i in context: + print("%s='%s'" % (i, context[i])) + + +def main(): + parser = argparse.ArgumentParser(description="ONE context helper") + parser.add_argument("-f", "--force", + dest="context_overwrite", + required=False, + action='store_const', + const=True, + default=False, + help="Forces overwrite of the file if needed") + parser.add_argument("-e", "--env-prefix", + required=False, + metavar="", + default="ONEAPP_", + help="Prefix of the context variables " + "(default: 'ONEAPP_')") + parser.add_argument("-t", "--output-type", + required=False, + metavar="json|names|shell", + choices=["json", "names", "shell"], + default="json", + help="Output type (affects only load and compare) " + "(default: 'json')") + parser.add_argument("context_action", + metavar="save|load|update|compare", + choices=["save", "load", "update", "compare"], + help=("Save/update context into the file," + " or load from it," + " or compare it with the current context.")) + parser.add_argument("context_file", + metavar="", + help="Filepath of the context file") + + args = parser.parse_args() + + if args.context_action == "save": + try: + if (os.path.isfile(args.context_file) + and (not args.context_overwrite)): + # file exists and no --force used... + raise SaveFileError + except SaveFileError: + error_msg("ERROR: Trying to save context but the file: '" + + args.context_file + "' already exists!") + error_msg("Hint 1: Try '--force' if you wish to overwrite it") + error_msg("Hint 2: Or maybe you want to use 'update'...") + return 1 + context = save_context(args.env_prefix, args.context_file) + + elif args.context_action == "load": + try: + if not os.path.isfile(args.context_file): + raise OpenFileError + except OpenFileError: + error_msg("ERROR: Trying to open the context file: '" + + args.context_file + "' but it doesn't exist!") + return 1 + context = load_context(args.env_prefix, args.context_file) + + # dump context values which should be sourced by caller + print_result(context, args.output_type) + + elif args.context_action == "update": + if os.path.isfile(args.context_file): + # update existing + context = update_context(args.env_prefix, args.context_file) + else: + # no file yet, so simply save context instead + context = save_context(args.env_prefix, args.context_file) + + elif args.context_action == "compare": + try: + if not os.path.isfile(args.context_file): + raise OpenFileError + except OpenFileError: + error_msg("ERROR: Trying to open the context file: '" + + args.context_file + "' but it doesn't exist!") + return 1 + context = compare_context(args.env_prefix, args.context_file) + + # dump context values which should be sourced by caller + print_result(context, args.output_type) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/appliances/lib/functions.sh b/appliances/lib/functions.sh new file mode 100644 index 00000000..c382eb50 --- /dev/null +++ b/appliances/lib/functions.sh @@ -0,0 +1,407 @@ +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# args: "$@" +_parse_arguments() +{ + _ACTION=nil + state=nil + while [ -n "$1" ] ; do + case "$state" in + nil) + case "$1" in + -h|--help|help) + _ACTION=help + state=done + ;; + install) + _ACTION=install + state=install + ;; + configure|bootstrap) + _ACTION="$1" + state=configure + ;; + *) + _ACTION=badargs + msg unknown "BAD USAGE: unknown argument: $1" + break + ;; + esac + ;; + configure) + case "$1" in + reconfigure) + ONE_SERVICE_RECONFIGURE=true + state=done + ;; + *) + _ACTION=badargs + msg unknown "BAD USAGE: unknown argument: $1" + break + ;; + esac + ;; + install) + ONE_SERVICE_VERSION="$1" + state=done + ;; + done) + _ACTION=badargs + msg unknown "BAD USAGE: extraneous argument(s)" + break + ;; + esac + shift + done +} + +# args: "$0" "${@}" +_lock_or_fail() +{ + this_script="$1" + if [ "${_SERVICE_LOCK}" != "$this_script" ] ; then + exec env _SERVICE_LOCK="$this_script" flock -xn $this_script "$@" + fi +} + +_on_exit() +{ + # this is the exit handler - I want to clean up as much as I can + set +e + + # first do whatever the service appliance needs to clean after itself + service_cleanup + + # delete temporary working file(s) + if [ -n "$_SERVICE_LOG_PIPE" ] ; then + rm -f "$_SERVICE_LOG_PIPE" + fi + + # exiting while the stage was interrupted - change status to failure + _status=$(_get_current_service_result) + case "$_status" in + started) + _set_service_status failure + ;; + esac + + # all done - delete pid file and exit + rm -f "$ONE_SERVICE_PIDFILE" +} + +_trap_exit() +{ + trap '_on_exit 2>/dev/null' INT QUIT TERM EXIT +} + +_is_running() +{ + pid=$(_get_pid) + + if echo "$pid" | grep -q '^[0-9]\+$' ; then + kill -0 $pid + return $? + fi + + return 1 +} + +_get_pid() +{ + if [ -f "$ONE_SERVICE_PIDFILE" ] ; then + cat "$ONE_SERVICE_PIDFILE" + fi +} + +_write_pid() +{ + echo $$ > "$ONE_SERVICE_PIDFILE" +} + +_get_service_status() +{ + if [ -f "$ONE_SERVICE_STATUS" ] ; then + cat "$ONE_SERVICE_STATUS" + fi +} + +_get_current_service_step() +{ + _get_service_status | sed -n 's/^\(install\|configure\|bootstrap\)_.*/\1/p' +} + +_get_current_service_result() +{ + _result=$(_get_service_status | sed -n 's/^\(install\|configure\|bootstrap\)_\(.*\)/\2/p') + case "$_result" in + started|success|failure) + echo "$_result" + ;; + esac +} + +# arg: install|configure|bootstrap [| +_check_service_status() +{ + _reconfigure="$2" + + case "$1" in + install) + case "$(_get_service_status)" in + '') + # nothing was done so far + return 0 + ;; + install_success) + msg warning "Installation was already done - skip" + return 1 + ;; + install_started) + msg error "Installation was probably interrupted - abort" + _set_service_status failure + exit 1 + ;; + install_failure) + msg error "Last installation attempt failed - abort" + exit 1 + ;; + *) + msg error "Install step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + configure) + case "$(_get_service_status)" in + '') + # nothing was done so far - missing install + msg error "Cannot proceed with configuration - missing installation step" + exit 1 + ;; + install_success) + # installation was successfull - can continue + return 0 + ;; + configure_success) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg warning "Configuration was already done - skip" + return 1 + fi + ;; + configure_started) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Configuration was probably interrupted - abort" + _set_service_status failure + exit 1 + fi + ;; + configure_failure) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Last configuration attempt failed - abort" + exit 1 + fi + ;; + bootstrap*) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Configure step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + fi + ;; + *) + msg error "Configure step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + bootstrap) + case "$(_get_service_status)" in + '') + # nothing was done so far - missing install + msg error "Cannot proceed with bootstrapping - missing installation step" + exit 1 + ;; + configure_success) + # configuration was successfull - can continue + return 0 + ;; + bootstrap_success) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg warning "Bootstrap was already done - skip" + return 1 + fi + ;; + bootstrap_started) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg error "Bootstrap was probably interrupted - abort" + _set_service_status failure + exit 1 + fi + ;; + bootstrap_failure) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg error "Last bootstrap attempt failed - abort" + exit 1 + fi + ;; + *) + msg error "Bootstrap step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + esac + + msg error "THIS SHOULD NOT HAPPEN!" + msg unknown "Possibly a bug, wrong usage, action etc." + exit 1 +} + +# arg: install|configure|bootstrap|success|failure +_set_service_status() +{ + _status="$1" + case "$_status" in + install|configure|bootstrap) + echo ${_status}_started > "$ONE_SERVICE_STATUS" + _set_motd "$_status" started + ;; + success|failure) + _step=$(_get_current_service_step) + echo ${_step}_${_status} > "$ONE_SERVICE_STATUS" + _set_motd "$_step" "$_status" + ;; + *) + msg unknown "THIS SHOULD NOT HAPPEN!" + msg unknown "Possibly a bug, wrong usage, action etc." + exit 1 + ;; + esac +} + +_print_logo() +{ + cat > ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} < +_start_log() +{ + _logfile="$1" + _SERVICE_LOG_PIPE="$ONE_SERVICE_LOGDIR"/one_service_log.pipe + + # create named pipe + mknod "$_SERVICE_LOG_PIPE" p + + # connect tee to the pipe and let it write to the log and screen + tee <"$_SERVICE_LOG_PIPE" -a "$_logfile" & + + # save stdout to fd 3 and force shell to write to the pipe + exec 3>&1 >"$_SERVICE_LOG_PIPE" +} + +_end_log() +{ + # restore stdout for the shell and close fd 3 + exec >&3 3>&- +} + diff --git a/appliances/scripts/context_service_net-90.sh b/appliances/scripts/context_service_net-90.sh new file mode 100644 index 00000000..ca494b20 --- /dev/null +++ b/appliances/scripts/context_service_net-90.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Runs OpenNebula service appliances configuration & bootstrap script + +#TODO: just single run based on "status" +_oneapp_service='/etc/one-appliance/service' + +# one-context 6.2.0+ shifts the command argument +if [ $# -eq 2 ]; then + _reconfigure="$2" +else + _reconfigure="$1" +fi + +if [ -x "${_oneapp_service}" ]; then + "${_oneapp_service}" configure "$_reconfigure" && \ + "${_oneapp_service}" bootstrap +fi diff --git a/appliances/scripts/context_service_net-99.sh b/appliances/scripts/context_service_net-99.sh new file mode 100644 index 00000000..7633ce15 --- /dev/null +++ b/appliances/scripts/context_service_net-99.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} + +if [ "$REPORT_READY" != "YES" ]; then + exit 0 +fi + +# $TOKENTXT is available only through the env. file +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +# Reports only if ONE service appliance bootstrapped successfully +if [ -x '/etc/one-appliance/service' ]; then + _status=$(cat '/etc/one-appliance/status' 2>/dev/null) + if [ "${_status}" != 'bootstrap_success' ]; then + exit 0 + fi +fi + +### + +if which onegate >/dev/null 2>&1; then + onegate vm update --data "READY=YES" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi + +if which curl >/dev/null 2>&1; then + curl -X "PUT" "${ONEGATE_ENDPOINT}/vm" \ + --header "X-ONEGATE-TOKEN: $TOKENTXT" \ + --header "X-ONEGATE-VMID: $VMID" \ + -d "READY=YES" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi + +if which wget >/dev/null 2>&1; then + wget --method=PUT "${ONEGATE_ENDPOINT}/vm" \ + --body-data="READY=YES" \ + --header "X-ONEGATE-TOKEN: $TOKENTXT" \ + --header "X-ONEGATE-VMID: $VMID" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi diff --git a/appliances/service b/appliances/service new file mode 100755 index 00000000..d02530af --- /dev/null +++ b/appliances/service @@ -0,0 +1,133 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# USAGE: +# service [-h|--help|help] +# Print help and usage +# +# service install [] +# Download files and install packages for the desired version of a service +# +# service configure +# Configure the service via contextualization or with defaults +# +# service bootstrap +# Use user's predefined values for the final setup and start the service + +ONE_SERVICE_DIR=/etc/one-appliance +ONE_SERVICE_LOGDIR=/var/log/one-appliance +ONE_SERVICE_STATUS="${ONE_SERVICE_DIR}/status" +ONE_SERVICE_TEMPLATE="${ONE_SERVICE_DIR}/template" +ONE_SERVICE_METADATA="${ONE_SERVICE_DIR}/metadata" +ONE_SERVICE_REPORT="${ONE_SERVICE_DIR}/config" +ONE_SERVICE_FUNCTIONS="${ONE_SERVICE_DIR}/service.d/functions.sh" +ONE_SERVICE_COMMON="${ONE_SERVICE_DIR}/service.d/common.sh" +ONE_SERVICE_APPLIANCE="${ONE_SERVICE_DIR}/service.d/appliance.sh" +ONE_SERVICE_SETUP_DIR="/opt/one-appliance" +ONE_SERVICE_MOTD='/etc/motd' +ONE_SERVICE_PIDFILE="/var/run/one-appliance-service.pid" +ONE_SERVICE_CONTEXTFILE="${ONE_SERVICE_DIR}/context.json" +ONE_SERVICE_RECONFIGURE=false # the first time is always a full configuration +ONE_SERVICE_VERSION= # can be set by argument or to default +ONE_SERVICE_RECONFIGURABLE= # can be set by the appliance script + +# security precautions +set -e +umask 0077 + +# -> TODO: read all from ONE_SERVICE_DIR + +# source common functions +. "$ONE_SERVICE_COMMON" + +# source this script's functions +. "$ONE_SERVICE_FUNCTIONS" + +# source service appliance implementation (following functions): +# service_help +# service_install +# service_configure +# service_bootstrap +# service_cleanup +. "$ONE_SERVICE_APPLIANCE" + +# parse arguments and set _ACTION +_parse_arguments "$@" + +# execute requested action or fail +case "$_ACTION" in + nil|help) + # check if the appliance defined a help function + if type service_help >/dev/null 2>&1 ; then + # use custom appliance help + service_help + else + # use default + default_service_help + fi + ;; + badargs) + exit 1 + ;; + # all stages do basically this: + # 1. check status file if _ACTION can be run at all + # 2. set service status file + # 3. set motd (message of the day) + # 4. execute stage (install, configure or bootstrap) + # 5. set service status file again + # 6. set motd to normal or to signal failure + install|configure|bootstrap) + # check the status (am I running already) + if _is_running ; then + msg warning "Service script is running already - PID: $(_get_pid)" + exit 0 + fi + + # secure lock or fail (only one running instance of this script is allowed) + _lock_or_fail "$0" "$@" + + # set a trap for an exit (cleanup etc.) + _trap_exit + + # write a pidfile + _write_pid + + # analyze the current stage and either proceed or abort + if ! _check_service_status $_ACTION "$ONE_SERVICE_RECONFIGURABLE" ; then + exit 0 + fi + + # mark the start of a stage (install, configure or bootstrap) + _set_service_status $_ACTION + + # here we make sure that log directory exists + mkdir -p "$ONE_SERVICE_LOGDIR" + chmod 0700 "$ONE_SERVICE_LOGDIR" + + # execute action + _start_log "${ONE_SERVICE_LOGDIR}/ONE_${_ACTION}.log" + service_${_ACTION} 2>&1 + _end_log + + # if we reached this point then the current stage was successfull + _set_service_status success + ;; +esac + +exit 0 + diff --git a/guestfish/service_OneKE/10-update-distro.sh b/guestfish/service_OneKE/10-update-distro.sh new file mode 100644 index 00000000..5b53ad3b --- /dev/null +++ b/guestfish/service_OneKE/10-update-distro.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# (Auto)Removes unneeded packages and updates +# the distro. + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get update -y + +policy_rc_d_disable + +apt-get install -y --fix-broken + +apt-get upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +# Ensure packages needed for post-processing scripts do exist. +apt-get install -y curl gawk grep jq + +# Used by Longhorn. +apt-get install -y open-iscsi && systemctl enable iscsid + +policy_rc_d_enable + +sync diff --git a/guestfish/service_OneKE/11-update-grub.sh b/guestfish/service_OneKE/11-update-grub.sh new file mode 120000 index 00000000..0440735a --- /dev/null +++ b/guestfish/service_OneKE/11-update-grub.sh @@ -0,0 +1 @@ +../ubuntu2204/11-update-grub.sh \ No newline at end of file diff --git a/guestfish/service_OneKE/80-install-context.sh b/guestfish/service_OneKE/80-install-context.sh new file mode 120000 index 00000000..692005f8 --- /dev/null +++ b/guestfish/service_OneKE/80-install-context.sh @@ -0,0 +1 @@ +../ubuntu2204/80-install-context.sh \ No newline at end of file diff --git a/guestfish/service_OneKE/81-configure-ssh.sh b/guestfish/service_OneKE/81-configure-ssh.sh new file mode 120000 index 00000000..0a495d62 --- /dev/null +++ b/guestfish/service_OneKE/81-configure-ssh.sh @@ -0,0 +1 @@ +../ubuntu2204/81-configure-ssh.sh \ No newline at end of file diff --git a/guestfish/service_OneKE/82-configure-context.sh b/guestfish/service_OneKE/82-configure-context.sh new file mode 100644 index 00000000..43c0feec --- /dev/null +++ b/guestfish/service_OneKE/82-configure-context.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# Configures and enables service context. + +policy_rc_d_disable() (echo "exit 101" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) +policy_rc_d_enable() (echo "exit 0" >/usr/sbin/policy-rc.d && chmod a+x /usr/sbin/policy-rc.d) + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +policy_rc_d_disable + +apt-get install -y apparmor tzdata + +mv /etc/one-appliance/net-90 /etc/one-context.d/net-90-service-appliance +mv /etc/one-appliance/net-99 /etc/one-context.d/net-99-report-ready + +chown root:root /etc/one-context.d/* +chmod u=rwx,go=rx /etc/one-context.d/* + +policy_rc_d_enable + +sync diff --git a/guestfish/service_OneKE/83-disable-docs.sh b/guestfish/service_OneKE/83-disable-docs.sh new file mode 100644 index 00000000..719e5aac --- /dev/null +++ b/guestfish/service_OneKE/83-disable-docs.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Removes man pages and share/doc data, then prevents +# from re-populating. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 /etc/dpkg/dpkg.cfg.d/excludes <<'EOF' +path-exclude=/usr/share/man/* +path-exclude=/usr/share/locale/*/LC_MESSAGES/*.mo +path-exclude=/usr/share/doc/* +path-include=/usr/share/doc/*/copyright +path-include=/usr/share/doc/*/changelog.Debian.* +EOF + +rm -rf /usr/share/man/* ||: +rm -f /usr/share/locale/*/LC_MESSAGES/*.mo ||: + +TMP_DIR=$(mktemp -d) && cd "$TMP_DIR/" +mv -f /usr/share/doc/* . ||: +cp -rf --parents */copyright /usr/share/doc/ ||: +cp -rf --parents */changelog.Debian.* /usr/share/doc/ ||: +cd ../ && rm -rf "$TMP_DIR/" + +sync diff --git a/guestfish/service_OneKE/98-collect-garbage.sh b/guestfish/service_OneKE/98-collect-garbage.sh new file mode 100644 index 00000000..ff2c1884 --- /dev/null +++ b/guestfish/service_OneKE/98-collect-garbage.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Cleans APT caches, removes temporary files / logs, +# removes leftover / temporary unneeded packages. + +exec 1>&2 +set -o errexit -o nounset -o pipefail +set -x + +export DEBIAN_FRONTEND=noninteractive + +apt-get purge -y cloud-init fwupd snapd + +apt-get autoremove -y --purge + +apt-get clean -y && rm -rf /var/lib/apt/lists/* + +rm -f /etc/sysctl.d/99-cloudimg-ipv6.conf + +rm -rf /context/ + +sync diff --git a/packer/service_OneKE/run.sh b/packer/service_OneKE/run.sh new file mode 100755 index 00000000..3294b2e2 --- /dev/null +++ b/packer/service_OneKE/run.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# +# QEMU_BINARY should be exported already + +DISTRO=$(basename $(dirname "$0")) +DST=$1 +DIR_CURR=$(dirname "$0") + +BASE_IMAGE=$DIR_BASE/$DISTRO.img +PACKER_WORKING_DIR=$DIR_BUILD/_packer/$DISTRO +CLOUD_WORKING_DIR=$DIR_BUILD/_cloud-init/$DISTRO +mkdir -p "$PACKER_WORKING_DIR" +mkdir -p "$CLOUD_WORKING_DIR" + +# create cloud-init iso +touch ${CLOUD_WORKING_DIR}/empty-meta-data +cloud-localds \ + ${CLOUD_WORKING_DIR}/cloud-init.iso \ + ${DIR_CURR}/service-OneKE.yml \ + ${CLOUD_WORKING_DIR}/empty-meta-data + +packer build -force \ + -var "image_url=${DIR_BASE}/ubuntu2204.img" \ + -var "output_dir=${PACKER_WORKING_DIR}" \ + -var "cloud_init_iso=${CLOUD_WORKING_DIR}/cloud-init.iso" \ + -var "serial=file:${TTY}" \ + -var "appliance_name=service_OneKE.qcow2" \ + -var "appliance_script=appliances/OneKE/" \ + -var "appliance_label=OneKE" \ + -var "appliance_airgapped=true" \ + "$DIR_CURR/service-OneKE.pkr.hcl" + + +mv "$PACKER_WORKING_DIR/$DISTRO.qcow2" "$DST" +rm -rf ${PACKER_WORKING_DIR} +rm -rf ${CLOUD_WORKING_DIR} + diff --git a/packer/service_OneKE/service-OneKE.pkr.hcl b/packer/service_OneKE/service-OneKE.pkr.hcl new file mode 100644 index 00000000..909945ed --- /dev/null +++ b/packer/service_OneKE/service-OneKE.pkr.hcl @@ -0,0 +1,126 @@ +variable "image_url" { + type = string +} + +variable "cloud_init_iso" { + type = string +} + +variable "output_dir" { + type = string +} + +variable "serial" { + type = string + default = "stdio" +} + +variable "appliance_name" { + type = string + default = null +} + +variable "appliance_script" { + type = string + default = null +} + +variable "appliance_label" { + type = string + default = null +} + +variable "appliance_airgapped" { + type = bool + default = true +} + +source "qemu" "ubuntu" { + accelerator = "kvm" + headless = true + + iso_url = var.image_url + iso_checksum = "none" + disk_image = true + disk_cache = "unsafe" + disk_size = 20480 + format = "qcow2" + + disk_interface = "virtio-scsi" + net_device = "virtio-net" + + qemuargs = [ + ["-m", "8G"], + ["-serial", var.serial], + ["-cdrom", var.cloud_init_iso], + ] + + ssh_username = "root" + ssh_password = "v-YC470*/9i2CX+y3fP:D+%Z-1g-|p4P" + + shutdown_command = "poweroff" + + output_directory = var.output_dir + vm_name = var.appliance_name +} + +build { + sources = ["source.qemu.ubuntu"] + + provisioner "shell" { + inline = [ + "mkdir -p /etc/one-appliance/service.d", + "chmod 0750 /etc/one-appliance", + "mkdir -p /opt/one-appliance/bin", + "chmod -R 0755 /opt/one-appliance/", + ] + } + + provisioner "file" { + source = "appliances/scripts/context_service_net-90.sh" + destination = "/etc/one-appliance/net-90" + } + + provisioner "file" { + source = "appliances/scripts/context_service_net-99.sh" + destination = "/etc/one-appliance/net-99" + } + + provisioner "file" { + source = "appliances/service" + destination = "/etc/one-appliance/service" + } + + provisioner "file" { + source = "appliances/lib/common.sh" + destination = "/etc/one-appliance/service.d/common.sh" + } + + provisioner "file" { + source = "appliances/lib/functions.sh" + destination = "/etc/one-appliance/service.d/functions.sh" + } + + provisioner "file" { + source = "appliances/lib/context-helper.py" + destination = "/opt/one-appliance/bin/context-helper" + } + + provisioner "file" { + source = var.appliance_script + destination = "/etc/one-appliance/service.d/" + } + + provisioner "shell" { + environment_vars = [ + "ONE_SERVICE_AIRGAPPED=${var.appliance_airgapped ? "YES" : "NO"}", + ] + inline = [ + "find /opt/one-appliance/ -type f -exec chmod 0640 '{}' \\;", + "chmod 0755 /opt/one-appliance/bin/*", + "chmod 0740 /etc/one-appliance/service", + "chmod 0640 /etc/one-appliance/service.d/*", + "/etc/one-appliance/service install", + ] + } +} diff --git a/packer/service_OneKE/service-OneKE.yml b/packer/service_OneKE/service-OneKE.yml new file mode 100644 index 00000000..241874c9 --- /dev/null +++ b/packer/service_OneKE/service-OneKE.yml @@ -0,0 +1,52 @@ +#cloud-config +growpart: + mode: auto + devices: [/] + +apt: + primary: + - arches: [default] + uri: mirror://mirrors.ubuntu.com/mirrors.txt + - arches: [default] + uri: http://archive.ubuntu.com/ubuntu + updates: + - arches: [default] + uri: mirror://mirrors.ubuntu.com/mirrors.txt + - arches: [default] + uri: http://archive.ubuntu.com/ubuntu + security: + - arches: [default] + uri: http://security.ubuntu.com/ubuntu + +package_upgrade: true + +packages: + - apt-transport-https + - bash + - ca-certificates + - curl + - gawk + - gnupg + - jq + - lsb-release + - ruby + - zstd + +disable_root: false + +users: + - name: root + lock_passwd: false + hashed_passwd: $6$rounds=2000000$CZnmDr1iAnoCk$l5fEPfdtBpwfvOA1.Wn4Ipbh1Y.ahTPzNPZyXvaU2T4MtS907l8QqwMKLLa/8XMDpV2ZuXFUDX8aG2YqRX7mM1 + +ssh_pwauth: true + +runcmd: + - | + gawk -i inplace -f- /etc/ssh/sshd_config <<'EOF' + BEGIN { update = "PermitRootLogin yes" } + /^#*PermitRootLogin/ { $0 = update; found = 1 } + { print } + END { if (!found) print update >>FILENAME } + EOF + - systemctl reload sshd