From f46b435655f789c81e9893b10856e5ec1fcb4599 Mon Sep 17 00:00:00 2001 From: Jan Orel Date: Tue, 10 Oct 2023 18:05:18 +0200 Subject: [PATCH] F OpenNebula/one-infra#480: Add Packer build tools --- .gitignore | 7 + Makefile | 70 + Makefile.config | 63 + README.md | 10 + appliances/OneKE/appliance.sh | 11 + appliances/OneKE/appliance/.rubocop.yml | 30 + appliances/OneKE/appliance/appliance.rb | 81 + appliances/OneKE/appliance/calico.rb | 33 + appliances/OneKE/appliance/canal.rb | 33 + appliances/OneKE/appliance/cilium.rb | 71 + appliances/OneKE/appliance/cilium_spec.rb | 154 + appliances/OneKE/appliance/cleaner.rb | 125 + appliances/OneKE/appliance/cleaner_spec.rb | 482 ++ appliances/OneKE/appliance/config.rb | 69 + appliances/OneKE/appliance/helpers.rb | 242 + appliances/OneKE/appliance/helpers_spec.rb | 42 + appliances/OneKE/appliance/kubernetes.rb | 312 ++ appliances/OneKE/appliance/longhorn.rb | 143 + appliances/OneKE/appliance/metallb.rb | 109 + appliances/OneKE/appliance/metallb_spec.rb | 136 + appliances/OneKE/appliance/multus.rb | 35 + appliances/OneKE/appliance/onegate.rb | 133 + appliances/OneKE/appliance/onegate_spec.rb | 559 +++ appliances/OneKE/appliance/traefik.rb | 66 + appliances/OneKE/appliance/vnf.rb | 144 + .../lib/artifacts/vnf/ha-check-status.sh | 100 + appliances/lib/artifacts/vnf/ha-failover.sh | 261 + .../lib/artifacts/vnf/kea-config-generator | 902 ++++ .../artifacts/vnf/one-vnf/lib/appliance.rb | 153 + .../vnf/one-vnf/lib/appliance/plugin.rb | 59 + .../vnf/one-vnf/lib/appliance/plugin/dummy.rb | 39 + .../one-vnf/lib/appliance/plugin/haproxy.rb | 795 +++ .../lib/appliance/plugin/loadbalancer.rb | 1041 ++++ .../one-vnf/lib/appliance/plugin/sdnat4.rb | 407 ++ .../lib/artifacts/vnf/one-vnf/one-vnf.rb | 146 + .../kea-hook-onelease4-1.1.1-r0.apk | Bin 0 -> 32378 bytes appliances/lib/common.sh | 503 ++ appliances/lib/context-helper.py | 295 ++ appliances/lib/functions.sh | 407 ++ appliances/scripts/context_service_net-90.sh | 17 + appliances/scripts/context_service_net-99.sh | 52 + appliances/service | 133 + appliances/vnf.sh | 4387 +++++++++++++++++ appliances/wordpress.sh | 627 +++ context-linux/generate.sh | 2 +- packer/alma/10-upgrade-distro.sh | 17 + packer/alma/11-update-grub.sh | 57 + packer/alma/11-zzz.sh | 5 + packer/alma/80-install-context.sh.8 | 28 + packer/alma/80-install-context.sh.9 | 28 + packer/alma/81-configure-ssh.sh | 30 + packer/alma/98-collect-garbage.sh | 23 + packer/alma/alma.pkr.hcl | 76 + packer/alma/cloud-init.yml | 22 + packer/alma/plugins.pkr.hcl | 1 + packer/alma/variables.pkr.hcl | 38 + packer/alpine/10-upgrade-distro.sh | 20 + packer/alpine/11-update-boot.sh | 21 + packer/alpine/80-install-context.sh | 28 + packer/alpine/81-configure-ssh.sh | 30 + packer/alpine/98-collect-garbage.sh | 15 + packer/alpine/alpine.init | 57 + packer/alpine/alpine.pkr.hcl | 68 + packer/alpine/plugins.pkr.hcl | 1 + packer/alpine/variables.pkr.hcl | 43 + packer/alt/10-upgrade-distro.sh | 19 + packer/alt/11-update-grub.sh | 53 + packer/alt/80-install-context.sh | 42 + packer/alt/81-configure-ssh.sh | 30 + packer/alt/98-collect-garbage.sh | 25 + packer/alt/alt.pkr.hcl | 76 + packer/alt/cloud-init.yml | 22 + packer/alt/plugins.pkr.hcl | 1 + packer/alt/variables.pkr.hcl | 38 + packer/amazon/10-upgrade-distro.sh | 19 + packer/amazon/11-update-grub.sh | 62 + packer/amazon/80-install-context.sh | 20 + packer/amazon/81-configure-ssh.sh | 36 + packer/amazon/98-collect-garbage.sh | 21 + packer/amazon/amazon.pkr.hcl | 76 + packer/amazon/cloud-init.yml | 22 + packer/amazon/plugins.pkr.hcl | 1 + packer/amazon/variables.pkr.hcl | 34 + packer/build.sh | 29 + packer/centos/10-upgrade-distro.sh.7 | 19 + packer/centos/10-upgrade-distro.sh.8stream | 17 + packer/centos/11-update-grub.sh.7 | 62 + packer/centos/11-update-grub.sh.8stream | 55 + packer/centos/80-install-context.sh.7 | 28 + packer/centos/80-install-context.sh.8stream | 28 + packer/centos/81-configure-ssh.sh.7 | 36 + packer/centos/81-configure-ssh.sh.8stream | 30 + packer/centos/98-collect-garbage.sh.7 | 20 + packer/centos/98-collect-garbage.sh.8stream | 22 + packer/centos/centos.pkr.hcl | 76 + packer/centos/cloud-init.yml | 22 + packer/centos/plugins.pkr.hcl | 1 + packer/centos/variables.pkr.hcl | 38 + packer/debian/10-upgrade-distro.sh | 28 + packer/debian/11-update-grub.sh | 54 + packer/debian/80-install-context.sh.10 | 41 + packer/debian/80-install-context.sh.11 | 41 + packer/debian/80-install-context.sh.12 | 41 + packer/debian/81-configure-ssh.sh | 30 + packer/debian/98-collect-garbage.sh | 27 + packer/debian/cloud-init.yml | 16 + packer/debian/debian.pkr.hcl | 75 + packer/debian/plugins.pkr.hcl | 1 + packer/debian/variables.pkr.hcl | 41 + packer/devuan/10-upgrade-distro.sh.3 | 37 + packer/devuan/10-upgrade-distro.sh.4 | 37 + packer/devuan/11-update-grub.sh | 54 + packer/devuan/80-install-context.sh | 32 + packer/devuan/81-configure-ssh.sh | 30 + packer/devuan/98-collect-garbage.sh | 20 + packer/devuan/devuan.pkr.hcl | 63 + packer/devuan/devuan3.preseed | 54 + packer/devuan/devuan4.preseed | 53 + packer/devuan/plugins.pkr.hcl | 1 + packer/devuan/variables.pkr.hcl | 38 + packer/fedora/10-upgrade-distro.sh | 15 + packer/fedora/11-update-grub.sh | 55 + packer/fedora/12-selinux-fixfiles.sh | 5 + packer/fedora/12-zzz.sh | 5 + packer/fedora/13-selinux-enforcing.sh | 3 + packer/fedora/80-install-context.sh | 38 + packer/fedora/81-configure-ssh.sh | 32 + packer/fedora/98-collect-garbage.sh | 19 + packer/fedora/cloud-init.yml | 23 + packer/fedora/fedora.pkr.hcl | 76 + packer/fedora/plugins.pkr.hcl | 1 + packer/fedora/variables.pkr.hcl | 37 + packer/freebsd/freebsd.pkr.hcl | 53 + packer/freebsd/mkdir | 3 + packer/freebsd/plugins.pkr.hcl | 1 + packer/freebsd/script.sh | 50 + packer/freebsd/variables.pkr.hcl | 142 + packer/ol/10-upgrade-distro.sh.8 | 17 + packer/ol/10-upgrade-distro.sh.9 | 17 + packer/ol/11-update-grub.sh | 57 + packer/ol/11-zzz.sh | 5 + packer/ol/80-install-context.sh.8 | 28 + packer/ol/80-install-context.sh.9 | 28 + packer/ol/81-configure-ssh.sh | 30 + packer/ol/98-collect-garbage.sh | 22 + packer/ol/cloud-init.yml | 22 + packer/ol/ol.pkr.hcl | 76 + packer/ol/plugins.pkr.hcl | 1 + packer/ol/variables.pkr.hcl | 38 + packer/opensuse/10-upgrade-distro.sh | 19 + packer/opensuse/11-update-grub.sh | 49 + packer/opensuse/80-install-context.sh | 22 + packer/opensuse/81-configure-ssh.sh | 30 + packer/opensuse/98-collect-garbage.sh | 26 + packer/opensuse/cloud-init.yml | 22 + packer/opensuse/opensuse.pkr.hcl | 78 + packer/opensuse/plugins.pkr.hcl | 1 + packer/opensuse/variables.pkr.hcl | 33 + packer/plugins.pkr.hcl | 8 + packer/postprocess.sh | 15 + packer/rhel/10-upgrade-distro.sh.8 | 34 + packer/rhel/10-upgrade-distro.sh.9 | 34 + packer/rhel/11-update-grub.sh | 57 + packer/rhel/11-zzz.sh | 5 + packer/rhel/80-install-context.sh.8 | 28 + packer/rhel/80-install-context.sh.9 | 28 + packer/rhel/81-configure-ssh.sh | 30 + packer/rhel/98-collect-garbage.sh | 26 + packer/rhel/99-unsubscribe.sh | 5 + packer/rhel/cloud-init.yml | 22 + packer/rhel/plugins.pkr.hcl | 1 + packer/rhel/rhel.pkr.hcl | 82 + packer/rhel/variables.pkr.hcl | 48 + packer/rocky/10-upgrade-distro.sh | 17 + packer/rocky/11-update-grub.sh | 55 + packer/rocky/80-install-context.sh | 28 + packer/rocky/80-install-context.sh.8 | 28 + packer/rocky/80-install-context.sh.9 | 28 + packer/rocky/81-configure-ssh.sh | 30 + packer/rocky/98-collect-garbage.sh | 23 + packer/rocky/plugins.pkr.hcl | 1 + packer/rocky/rocky.pkr.hcl | 63 + packer/rocky/rocky8.ks | 98 + packer/rocky/rocky9.ks | 97 + packer/rocky/variables.pkr.hcl | 38 + packer/service_OneKE/81-configure-ssh.sh | 30 + packer/service_OneKE/82-configure-context.sh | 24 + packer/service_OneKE/83-disable-docs.sh | 27 + packer/service_OneKE/OneKE.pkr.hcl | 128 + packer/service_OneKE/gen_context | 31 + packer/service_OneKE/variables.pkr.hcl | 22 + packer/service_vnf/10-update.sh | 4 + packer/service_vnf/81-configure-ssh.sh | 30 + packer/service_vnf/82-configure-context.sh | 19 + packer/service_vnf/gen_context | 30 + packer/service_vnf/variables.pkr.hcl | 23 + packer/service_vnf/vnf.pkr.hcl | 130 + packer/service_wordpress/81-configure-ssh.sh | 30 + .../service_wordpress/82-configure-context.sh | 15 + packer/service_wordpress/gen_context | 31 + packer/service_wordpress/variables.pkr.hcl | 23 + packer/service_wordpress/wordpress.pkr.hcl | 124 + packer/ubuntu/10-upgrade-distro.sh | 28 + packer/ubuntu/11-update-grub.sh | 54 + packer/ubuntu/80-install-context.sh | 40 + packer/ubuntu/81-configure-ssh.sh | 30 + packer/ubuntu/98-collect-garbage.sh | 22 + packer/ubuntu/cloud-init.yml | 22 + packer/ubuntu/plugins.pkr.hcl | 1 + packer/ubuntu/ubuntu.pkr.hcl | 76 + packer/ubuntu/variables.pkr.hcl | 48 + 211 files changed, 19137 insertions(+), 1 deletion(-) create mode 100644 Makefile create mode 100644 Makefile.config create mode 100644 README.md create mode 100644 appliances/OneKE/appliance.sh create mode 100644 appliances/OneKE/appliance/.rubocop.yml create mode 100644 appliances/OneKE/appliance/appliance.rb create mode 100644 appliances/OneKE/appliance/calico.rb create mode 100644 appliances/OneKE/appliance/canal.rb create mode 100644 appliances/OneKE/appliance/cilium.rb create mode 100644 appliances/OneKE/appliance/cilium_spec.rb create mode 100644 appliances/OneKE/appliance/cleaner.rb create mode 100644 appliances/OneKE/appliance/cleaner_spec.rb create mode 100644 appliances/OneKE/appliance/config.rb create mode 100644 appliances/OneKE/appliance/helpers.rb create mode 100644 appliances/OneKE/appliance/helpers_spec.rb create mode 100644 appliances/OneKE/appliance/kubernetes.rb create mode 100644 appliances/OneKE/appliance/longhorn.rb create mode 100644 appliances/OneKE/appliance/metallb.rb create mode 100644 appliances/OneKE/appliance/metallb_spec.rb create mode 100644 appliances/OneKE/appliance/multus.rb create mode 100644 appliances/OneKE/appliance/onegate.rb create mode 100644 appliances/OneKE/appliance/onegate_spec.rb create mode 100644 appliances/OneKE/appliance/traefik.rb create mode 100644 appliances/OneKE/appliance/vnf.rb create mode 100644 appliances/lib/artifacts/vnf/ha-check-status.sh create mode 100644 appliances/lib/artifacts/vnf/ha-failover.sh create mode 100755 appliances/lib/artifacts/vnf/kea-config-generator create mode 100644 appliances/lib/artifacts/vnf/one-vnf/lib/appliance.rb create mode 100644 appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin.rb create mode 100644 appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/dummy.rb create mode 100644 appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/haproxy.rb create mode 100644 appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/loadbalancer.rb create mode 100644 appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/sdnat4.rb create mode 100755 appliances/lib/artifacts/vnf/one-vnf/one-vnf.rb create mode 100644 appliances/lib/artifacts/vnf/onekea-2.2.0/kea-hook-onelease4-1.1.1-r0.apk create mode 100644 appliances/lib/common.sh create mode 100755 appliances/lib/context-helper.py create mode 100644 appliances/lib/functions.sh create mode 100644 appliances/scripts/context_service_net-90.sh create mode 100644 appliances/scripts/context_service_net-99.sh create mode 100755 appliances/service create mode 100644 appliances/vnf.sh create mode 100644 appliances/wordpress.sh create mode 100644 packer/alma/10-upgrade-distro.sh create mode 100644 packer/alma/11-update-grub.sh create mode 100644 packer/alma/11-zzz.sh create mode 100644 packer/alma/80-install-context.sh.8 create mode 100644 packer/alma/80-install-context.sh.9 create mode 100644 packer/alma/81-configure-ssh.sh create mode 100644 packer/alma/98-collect-garbage.sh create mode 100644 packer/alma/alma.pkr.hcl create mode 100644 packer/alma/cloud-init.yml create mode 120000 packer/alma/plugins.pkr.hcl create mode 100644 packer/alma/variables.pkr.hcl create mode 100644 packer/alpine/10-upgrade-distro.sh create mode 100644 packer/alpine/11-update-boot.sh create mode 100644 packer/alpine/80-install-context.sh create mode 100644 packer/alpine/81-configure-ssh.sh create mode 100644 packer/alpine/98-collect-garbage.sh create mode 100644 packer/alpine/alpine.init create mode 100644 packer/alpine/alpine.pkr.hcl create mode 120000 packer/alpine/plugins.pkr.hcl create mode 100644 packer/alpine/variables.pkr.hcl create mode 100644 packer/alt/10-upgrade-distro.sh create mode 100644 packer/alt/11-update-grub.sh create mode 100644 packer/alt/80-install-context.sh create mode 100644 packer/alt/81-configure-ssh.sh create mode 100644 packer/alt/98-collect-garbage.sh create mode 100644 packer/alt/alt.pkr.hcl create mode 100644 packer/alt/cloud-init.yml create mode 120000 packer/alt/plugins.pkr.hcl create mode 100644 packer/alt/variables.pkr.hcl create mode 100644 packer/amazon/10-upgrade-distro.sh create mode 100644 packer/amazon/11-update-grub.sh create mode 100644 packer/amazon/80-install-context.sh create mode 100644 packer/amazon/81-configure-ssh.sh create mode 100644 packer/amazon/98-collect-garbage.sh create mode 100644 packer/amazon/amazon.pkr.hcl create mode 100644 packer/amazon/cloud-init.yml create mode 120000 packer/amazon/plugins.pkr.hcl create mode 100644 packer/amazon/variables.pkr.hcl create mode 100755 packer/build.sh create mode 100644 packer/centos/10-upgrade-distro.sh.7 create mode 100644 packer/centos/10-upgrade-distro.sh.8stream create mode 100644 packer/centos/11-update-grub.sh.7 create mode 100644 packer/centos/11-update-grub.sh.8stream create mode 100644 packer/centos/80-install-context.sh.7 create mode 100644 packer/centos/80-install-context.sh.8stream create mode 100644 packer/centos/81-configure-ssh.sh.7 create mode 100644 packer/centos/81-configure-ssh.sh.8stream create mode 100644 packer/centos/98-collect-garbage.sh.7 create mode 100644 packer/centos/98-collect-garbage.sh.8stream create mode 100644 packer/centos/centos.pkr.hcl create mode 100644 packer/centos/cloud-init.yml create mode 120000 packer/centos/plugins.pkr.hcl create mode 100644 packer/centos/variables.pkr.hcl create mode 100644 packer/debian/10-upgrade-distro.sh create mode 100644 packer/debian/11-update-grub.sh create mode 100644 packer/debian/80-install-context.sh.10 create mode 100644 packer/debian/80-install-context.sh.11 create mode 100644 packer/debian/80-install-context.sh.12 create mode 100644 packer/debian/81-configure-ssh.sh create mode 100644 packer/debian/98-collect-garbage.sh create mode 100644 packer/debian/cloud-init.yml create mode 100644 packer/debian/debian.pkr.hcl create mode 120000 packer/debian/plugins.pkr.hcl create mode 100644 packer/debian/variables.pkr.hcl create mode 100644 packer/devuan/10-upgrade-distro.sh.3 create mode 100644 packer/devuan/10-upgrade-distro.sh.4 create mode 100644 packer/devuan/11-update-grub.sh create mode 100644 packer/devuan/80-install-context.sh create mode 100644 packer/devuan/81-configure-ssh.sh create mode 100644 packer/devuan/98-collect-garbage.sh create mode 100644 packer/devuan/devuan.pkr.hcl create mode 100644 packer/devuan/devuan3.preseed create mode 100644 packer/devuan/devuan4.preseed create mode 120000 packer/devuan/plugins.pkr.hcl create mode 100644 packer/devuan/variables.pkr.hcl create mode 100644 packer/fedora/10-upgrade-distro.sh create mode 100644 packer/fedora/11-update-grub.sh create mode 100644 packer/fedora/12-selinux-fixfiles.sh create mode 100644 packer/fedora/12-zzz.sh create mode 100644 packer/fedora/13-selinux-enforcing.sh create mode 100644 packer/fedora/80-install-context.sh create mode 100644 packer/fedora/81-configure-ssh.sh create mode 100644 packer/fedora/98-collect-garbage.sh create mode 100644 packer/fedora/cloud-init.yml create mode 100644 packer/fedora/fedora.pkr.hcl create mode 120000 packer/fedora/plugins.pkr.hcl create mode 100644 packer/fedora/variables.pkr.hcl create mode 100644 packer/freebsd/freebsd.pkr.hcl create mode 100644 packer/freebsd/mkdir create mode 120000 packer/freebsd/plugins.pkr.hcl create mode 100644 packer/freebsd/script.sh create mode 100644 packer/freebsd/variables.pkr.hcl create mode 100644 packer/ol/10-upgrade-distro.sh.8 create mode 100644 packer/ol/10-upgrade-distro.sh.9 create mode 100644 packer/ol/11-update-grub.sh create mode 100644 packer/ol/11-zzz.sh create mode 100644 packer/ol/80-install-context.sh.8 create mode 100644 packer/ol/80-install-context.sh.9 create mode 100644 packer/ol/81-configure-ssh.sh create mode 100644 packer/ol/98-collect-garbage.sh create mode 100644 packer/ol/cloud-init.yml create mode 100644 packer/ol/ol.pkr.hcl create mode 120000 packer/ol/plugins.pkr.hcl create mode 100644 packer/ol/variables.pkr.hcl create mode 100644 packer/opensuse/10-upgrade-distro.sh create mode 100644 packer/opensuse/11-update-grub.sh create mode 100644 packer/opensuse/80-install-context.sh create mode 100644 packer/opensuse/81-configure-ssh.sh create mode 100644 packer/opensuse/98-collect-garbage.sh create mode 100644 packer/opensuse/cloud-init.yml create mode 100644 packer/opensuse/opensuse.pkr.hcl create mode 120000 packer/opensuse/plugins.pkr.hcl create mode 100644 packer/opensuse/variables.pkr.hcl create mode 100644 packer/plugins.pkr.hcl create mode 100755 packer/postprocess.sh create mode 100644 packer/rhel/10-upgrade-distro.sh.8 create mode 100644 packer/rhel/10-upgrade-distro.sh.9 create mode 100644 packer/rhel/11-update-grub.sh create mode 100644 packer/rhel/11-zzz.sh create mode 100644 packer/rhel/80-install-context.sh.8 create mode 100644 packer/rhel/80-install-context.sh.9 create mode 100644 packer/rhel/81-configure-ssh.sh create mode 100644 packer/rhel/98-collect-garbage.sh create mode 100644 packer/rhel/99-unsubscribe.sh create mode 100644 packer/rhel/cloud-init.yml create mode 120000 packer/rhel/plugins.pkr.hcl create mode 100644 packer/rhel/rhel.pkr.hcl create mode 100644 packer/rhel/variables.pkr.hcl create mode 100644 packer/rocky/10-upgrade-distro.sh create mode 100644 packer/rocky/11-update-grub.sh create mode 100644 packer/rocky/80-install-context.sh create mode 100644 packer/rocky/80-install-context.sh.8 create mode 100644 packer/rocky/80-install-context.sh.9 create mode 100644 packer/rocky/81-configure-ssh.sh create mode 100644 packer/rocky/98-collect-garbage.sh create mode 120000 packer/rocky/plugins.pkr.hcl create mode 100644 packer/rocky/rocky.pkr.hcl create mode 100644 packer/rocky/rocky8.ks create mode 100644 packer/rocky/rocky9.ks create mode 100644 packer/rocky/variables.pkr.hcl create mode 100644 packer/service_OneKE/81-configure-ssh.sh create mode 100644 packer/service_OneKE/82-configure-context.sh create mode 100644 packer/service_OneKE/83-disable-docs.sh create mode 100644 packer/service_OneKE/OneKE.pkr.hcl create mode 100755 packer/service_OneKE/gen_context create mode 100644 packer/service_OneKE/variables.pkr.hcl create mode 100644 packer/service_vnf/10-update.sh create mode 100644 packer/service_vnf/81-configure-ssh.sh create mode 100644 packer/service_vnf/82-configure-context.sh create mode 100755 packer/service_vnf/gen_context create mode 100644 packer/service_vnf/variables.pkr.hcl create mode 100644 packer/service_vnf/vnf.pkr.hcl create mode 100644 packer/service_wordpress/81-configure-ssh.sh create mode 100644 packer/service_wordpress/82-configure-context.sh create mode 100755 packer/service_wordpress/gen_context create mode 100644 packer/service_wordpress/variables.pkr.hcl create mode 100644 packer/service_wordpress/wordpress.pkr.hcl create mode 100644 packer/ubuntu/10-upgrade-distro.sh create mode 100644 packer/ubuntu/11-update-grub.sh create mode 100644 packer/ubuntu/80-install-context.sh create mode 100644 packer/ubuntu/81-configure-ssh.sh create mode 100644 packer/ubuntu/98-collect-garbage.sh create mode 100644 packer/ubuntu/cloud-init.yml create mode 120000 packer/ubuntu/plugins.pkr.hcl create mode 100644 packer/ubuntu/ubuntu.pkr.hcl create mode 100644 packer/ubuntu/variables.pkr.hcl diff --git a/.gitignore b/.gitignore index 792f31b7..267547b7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,10 @@ +build/ +export/ +Makefile.local +version +packer/*/*-cloud-init.iso +packer/*/*-context.iso +packer/*/context/ context-windows/out/ context-windows/*.msi context-windows/rhsrvany.exe diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..4fea5154 --- /dev/null +++ b/Makefile @@ -0,0 +1,70 @@ +# load variables and makefile config +include Makefile.config + +# load possible overrides or non-free definitions +-include Makefile.local + +# all, aliases +all: $(patsubst %, packer-%, $(DISTROS)) $(patsubst %, packer-%, $(SERVICES)) +distros: $(patsubst %, packer-%, $(DISTROS)) +services: $(patsubst %, packer-%, $(SERVICES)) + +# allow individual distribution targets (e.g., "make debian11") +$(DISTROS) $(SERVICES): %: packer-% ; + +# aliases + dependency +packer-%: context-linux ${DIR_EXPORT}/%.qcow2 + @${INFO} "Packer ${*} done" + +packer-service_vnf: packer-alpine318 ${DIR_EXPORT}/service_vnf.qcow2 + @${INFO} "Packer service_vnf done" + +packer-service_wordpress: packer-alma8 ${DIR_EXPORT}/service_wordpress.qcow2 + @${INFO} "Packer service_wordpress done" + +packer-service_OneKE: packer-ubuntu2204 ${DIR_EXPORT}/service_OneKE.qcow2 + @${INFO} "Packer service_OneKE done" + +# run packer build for given distro or service +${DIR_EXPORT}/%.qcow2: + $(eval DISTRO_NAME := $(shell echo ${*} | sed 's/[0-9].*//')) + $(eval DISTRO_VER := $(shell echo ${*} | sed 's/^.[^0-9]*\(.*\)/\1/')) + packer/build.sh "${DISTRO_NAME}" "${DISTRO_VER}" ${@} + +# context packages +context-linux: $(patsubst %, context-linux/out/%, $(LINUX_CONTEXT_PACKAGES)) + @${INFO} "Generate context-linux done" + +context-linux/out/%: + cd context-linux; ./generate-all.sh + +context-windows: $(patsubst %, context-windows/out/%, $(WINDOWS_CONTEXT_PACKAGES)) + @${INFO} "Generate context-windows done" + +context-windows/out/%: + cd context-windows; ./generate-all.sh + +clean: + -rm -rf ${DIR_EXPORT}/* + +help: + @echo 'Usage examples:' + @echo ' make -- build just one distro' + @echo ' make -- build just one service' + @echo + @echo ' make all -- build all distros and services' + @echo ' make distros -- build all distros' + @echo ' make services -- build all services' + @echo + @echo ' make context-linux -- build context linux packages' + @echo ' make context-windows -- build windows linux packages' + @echo + @echo 'Available distros:' + @echo "$(shell echo "${DISTROS}" | fmt -w 65 | tr '\n' '\1' )" \ + | tr '\1' '\n' | sed 's/^/ /' + @echo 'Available services:' + @echo ' $(SERVICES)' + @echo + +version: + @echo $(VERSION)-$(RELEASE) > version diff --git a/Makefile.config b/Makefile.config new file mode 100644 index 00000000..0e49530b --- /dev/null +++ b/Makefile.config @@ -0,0 +1,63 @@ +# context version definition +VERSION := 6.6.1 +RELEASE := 1 + +# log +VERBOSE := 1 +PACKER_LOG := 0 +PACKER_HEADLESS := true + +DISTROS := alma8 alma9 \ + alpine316 alpine317 alpine318\ + alt9 alt10 \ + amazon2 \ + centos7 centos8stream \ + debian10 debian11 debian12 \ + devuan3 devuan4\ + fedora37 fedora38 \ + freebsd12 freebsd13 \ + ol8 ol9 \ + opensuse15 \ + rocky8 rocky9 \ + ubuntu2004 ubuntu2004min ubuntu2204 ubuntu2204min + +SERVICES := service_vnf service_wordpress service_OneKE + +.DEFAULT_GOAL := help + +# default directories +DIR_BUILD := build +DIR_EXPORT := export +$(shell mkdir -p ${DIR_BUILD} ${DIR_EXPORT}) + +# don't delete exported +.SECONDARY: $(patsubst %, $(DIR_EXPORT)/%.qcow2, $(DISTROS)) $(patsubst %, $(DIR_EXPORT)/%.qcow2, $(SERVICES)) + +.PHONY: context-linux context-windows help + +# this needs to match context-linux/generate-all.sh products +LINUX_CONTEXT_PACKAGES := one-context_${VERSION}-${RELEASE}.deb \ + one-context-${VERSION}-${RELEASE}.el6.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.el7.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.el8.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.el9.noarch.rpm \ + one-context-${VERSION}-${RELEASE}.suse.noarch.rpm \ + one-context-${VERSION}_${RELEASE}.txz \ + one-context-${VERSION}-alt${RELEASE}.noarch.rpm \ + one-context-${VERSION}-r${RELEASE}.apk \ + one-context-linux-${VERSION}-${RELEASE}.iso + +LINUX_CONTEXT_PACKAGES_FULL := $(patsubst %, context-linux/out/%, $(LINUX_CONTEXT_PACKAGES)) + +# this needs to match context-windows/generate-all.sh products +WINDOWS_CONTEXT_PACKAGES := one-context-${VERSION}.msi \ + one-context-${VERSION}.iso + +WINDOWS_CONTEXT_PACKAGES_FULL := $(patsubst %, context-windows/out/%, $(WINDOWS_CONTEXT_PACKAGES)) + + +# logging func +INFO=sh -c 'if [ $(VERBOSE) = 1 ]; then echo [INFO] $$1; fi' INFO + +# export all variables +export diff --git a/README.md b/README.md new file mode 100644 index 00000000..543eef53 --- /dev/null +++ b/README.md @@ -0,0 +1,10 @@ +# one-apps +Toolchain to build OpenNebula appliances + +Requirements: +- make +- Packer +- Qemu Packer Plugin +- cloud-utils +- guestfs-tools +- qemu-img diff --git a/appliances/OneKE/appliance.sh b/appliances/OneKE/appliance.sh new file mode 100644 index 00000000..a0ecf6bf --- /dev/null +++ b/appliances/OneKE/appliance.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +service_bootstrap() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" bootstrap; } + +service_cleanup() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" cleanup; } + +service_configure() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" configure; } + +service_install() { ruby -- "${BASH_SOURCE%.*}/appliance.rb" install; } + +return diff --git a/appliances/OneKE/appliance/.rubocop.yml b/appliances/OneKE/appliance/.rubocop.yml new file mode 100644 index 00000000..0ca2b581 --- /dev/null +++ b/appliances/OneKE/appliance/.rubocop.yml @@ -0,0 +1,30 @@ +AllCops: + Exclude: + - '*_spec.rb' + +Lint/MissingCopEnableDirective: + Enabled: false + +Layout/FirstArrayElementIndentation: + Enabled: false + +Layout/FirstHashElementIndentation: + Enabled: false + +Layout/HashAlignment: + Enabled: false + +Layout/HeredocIndentation: + Enabled: false + +Layout/IndentationWidth: + Enabled: false + +Layout/MultilineMethodCallIndentation: + Enabled: false + +Metrics/BlockLength: + Enabled: false + +Metrics/MethodLength: + Enabled: false diff --git a/appliances/OneKE/appliance/appliance.rb b/appliances/OneKE/appliance/appliance.rb new file mode 100644 index 00000000..0511ae3a --- /dev/null +++ b/appliances/OneKE/appliance/appliance.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' + +require_relative 'vnf.rb' +require_relative 'kubernetes.rb' + +require_relative 'multus.rb' +require_relative 'calico.rb' +require_relative 'canal.rb' +require_relative 'cilium.rb' + +require_relative 'metallb.rb' +require_relative 'traefik.rb' +require_relative 'longhorn.rb' +require_relative 'cleaner.rb' + +if caller.empty? + case ARGV[0].to_sym + when :install + install_packages PACKAGES + + with_policy_rc_d_disabled do + install_kubernetes + end + + install_metallb + install_traefik + install_longhorn + install_cleaner + + # NOTE: Longhorn images are pulled separately. + pull_addon_images if ONE_SERVICE_AIRGAPPED + + msg :info, 'Installation completed successfully' + + when :configure + prepare_dedicated_storage unless ONEAPP_STORAGE_DEVICE.nil? + + configure_vnf + + if ONE_SERVICE_AIRGAPPED + include_images 'rke2-images-core' + include_images 'rke2-images-multus' if ONEAPP_K8S_MULTUS_ENABLED + include_images 'rke2-images-cilium' if ONEAPP_K8S_CNI_PLUGIN == 'cilium' + + include_images 'one-longhorn' if ONEAPP_K8S_LONGHORN_ENABLED + include_images 'one-metallb' if ONEAPP_K8S_METALLB_ENABLED + include_images 'one-traefik' if ONEAPP_K8S_TRAEFIK_ENABLED + include_images 'one-cleaner' + end + + node = configure_kubernetes( + configure_cni: ->{ + configure_multus if ONEAPP_K8S_MULTUS_ENABLED + configure_calico if ONEAPP_K8S_CNI_PLUGIN == 'calico' + configure_canal if ONEAPP_K8S_CNI_PLUGIN == 'canal' + configure_cilium if ONEAPP_K8S_CNI_PLUGIN == 'cilium' + }, + configure_addons: ->{ + configure_metallb if ONEAPP_K8S_METALLB_ENABLED + + include_manifests 'one-longhorn' if ONEAPP_K8S_LONGHORN_ENABLED + include_manifests 'one-metallb' if ONEAPP_K8S_METALLB_ENABLED + include_manifests 'one-traefik' if ONEAPP_K8S_TRAEFIK_ENABLED + include_manifests 'one-cleaner' + } + ) + + if node[:join_worker] + vnf_ingress_setup_https_backend + vnf_ingress_setup_http_backend + end + + msg :info, 'Configuration completed successfully' + + when :bootstrap + puts 'bootstrap_success' + end +end diff --git a/appliances/OneKE/appliance/calico.rb b/appliances/OneKE/appliance/calico.rb new file mode 100644 index 00000000..fccc8a14 --- /dev/null +++ b/appliances/OneKE/appliance/calico.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_calico(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Calico' + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Calico CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-calico + namespace: kube-system + spec: + valuesContent: |- + MANIFEST + else + msg :info, 'Use Calico user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Calico config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-calico-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/canal.rb b/appliances/OneKE/appliance/canal.rb new file mode 100644 index 00000000..f0b3c397 --- /dev/null +++ b/appliances/OneKE/appliance/canal.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_canal(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Canal' + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Canal CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-canal + namespace: kube-system + spec: + valuesContent: |- + MANIFEST + else + msg :info, 'Use Canal user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Canal config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-canal-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/cilium.rb b/appliances/OneKE/appliance/cilium.rb new file mode 100644 index 00000000..84fd6e27 --- /dev/null +++ b/appliances/OneKE/appliance/cilium.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'base64' +require 'uri' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_cilium(manifest_dir = K8S_MANIFEST_DIR, endpoint = K8S_CONTROL_PLANE_EP) + msg :info, 'Configure Cilium' + + ep = URI.parse "https://#{endpoint}" + + if ONEAPP_K8S_CNI_CONFIG.nil? + msg :info, 'Create Cilium CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "#{ep.host}" + k8sServicePort: #{ep.port} + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: {} + MANIFEST + + unless ONEAPP_K8S_CILIUM_RANGES.empty? + ip_address_pool = documents.find do |doc| + doc['kind'] == 'CiliumLoadBalancerIPPool' && doc.dig('metadata', 'name') == 'default' + end + ip_address_pool['spec']['cidrs'] = extract_cilium_ranges.map do |item| + { 'cidr' => item.join('/') } + end + end + else + msg :info, 'Use Cilium user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_CNI_CONFIG + end + + msg :info, 'Generate Cilium config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-cilium-config.yaml", manifest, overwrite: true +end + +def extract_cilium_ranges(ranges = ONEAPP_K8S_CILIUM_RANGES) + ranges.compact + .map(&:strip) + .reject(&:empty?) + .map { |item| item.split('/').map(&:strip) } + .reject { |item| item.length > 2 } + .reject { |item| item.map(&:empty?).any? } + .reject { |item| !(ipv4?(item.first) && integer?(item.last)) } +end diff --git a/appliances/OneKE/appliance/cilium_spec.rb b/appliances/OneKE/appliance/cilium_spec.rb new file mode 100644 index 00000000..718f1141 --- /dev/null +++ b/appliances/OneKE/appliance/cilium_spec.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +require 'base64' +require 'rspec' +require 'tmpdir' +require 'yaml' + +require_relative 'cilium.rb' + +RSpec.describe 'extract_cilium_ranges' do + it 'should extract and return all ranges (positive)' do + input = [ + '10.11.12.0/24', + '10.11.0.0/16' + ] + output = [ + %w[10.11.12.0 24], + %w[10.11.0.0 16] + ] + expect(extract_cilium_ranges(input)).to eq output + end + + it 'should extract and return no ranges (negative)' do + input = [ + '', + '10.11.12.0', + '10.11.12.0/', + 'asd.11.12.0/24', + '10.11.12.0/asd' + ] + output = [] + expect(extract_cilium_ranges(input)).to eq output + end +end + +RSpec.describe 'configure_cilium' do + it 'should apply user-defined ranges (empty)' do + stub_const 'K8S_CONTROL_PLANE_EP', '192.168.150.86:6443' + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CNI_CONFIG', nil + stub_const 'ONEAPP_K8S_CILIUM_RANGES', [] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: {} + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined ranges' do + stub_const 'K8S_CONTROL_PLANE_EP', '192.168.150.86:6443' + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CILIUM_RANGES', ['192.168.150.128/25', '10.11.12.0/24'] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: + - cidr: 192.168.150.128/25 + - cidr: 10.11.12.0/24 + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined config manifest (and ignore user-defined ranges)' do + manifest = <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-cilium + namespace: kube-system + spec: + valuesContent: |- + kubeProxyReplacement: strict + k8sServiceHost: "192.168.150.86" + k8sServicePort: 6443 + cni: + chainingMode: "none" + exclusive: false + bgpControlPlane: + enabled: true + --- + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: default + namespace: kube-system + spec: + cidrs: + - cidr: 192.168.150.128/25 + - cidr: 10.11.12.0/24 + MANIFEST + stub_const 'ONEAPP_K8S_CNI_PLUGIN', 'cilium' + stub_const 'ONEAPP_K8S_CNI_CONFIG', Base64.encode64(manifest) + stub_const 'ONEAPP_K8S_CILIUM_RANGES', ['1.2.3.4/5', '6.7.8.9/10'] + output = YAML.load_stream manifest + Dir.mktmpdir do |temp_dir| + configure_cilium temp_dir + result = YAML.load_stream File.read "#{temp_dir}/rke2-cilium-config.yaml" + expect(result).to eq output + end + end + +end diff --git a/appliances/OneKE/appliance/cleaner.rb b/appliances/OneKE/appliance/cleaner.rb new file mode 100644 index 00000000..91eb747b --- /dev/null +++ b/appliances/OneKE/appliance/cleaner.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' + +def install_cleaner(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install One-Cleaner' + fetch_cleaner addon_dir +end + +def fetch_cleaner(addon_dir = ONE_ADDON_DIR, cron = '*/2 * * * *', ttl = 180) + msg :info, 'Generate One-Cleaner manifest' + + file "#{addon_dir}/one-cleaner.yaml", <<~MANIFEST, overwrite: true + apiVersion: batch/v1 + kind: CronJob + metadata: + name: one-cleaner + namespace: kube-system + spec: + schedule: "#{cron}" + jobTemplate: + spec: + ttlSecondsAfterFinished: #{ttl} + template: + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Equal + value: "true" + effect: NoExecute + nodeSelector: + node-role.kubernetes.io/master: "true" + containers: + - name: one-cleaner + image: ruby:2.7-alpine3.16 + imagePullPolicy: IfNotPresent + command: + - /usr/local/bin/ruby + - /etc/one-appliance/service.d/appliance/cleaner.rb + volumeMounts: + - name: kubectl + mountPath: /var/lib/rancher/rke2/bin/kubectl + - name: kubeconfig + mountPath: /etc/rancher/rke2/rke2.yaml + - name: context + mountPath: /var/run/one-context/one_env + - name: onegate + mountPath: /usr/bin/onegate + - name: onegaterb + mountPath: /usr/bin/onegate.rb + - name: appliance + mountPath: /etc/one-appliance/service.d/appliance/ + volumes: + - name: kubectl + hostPath: + path: /var/lib/rancher/rke2/bin/kubectl + type: File + - name: kubeconfig + hostPath: + path: /etc/rancher/rke2/rke2.yaml + type: File + - name: context + hostPath: + path: /var/run/one-context/one_env + type: File + - name: onegate + hostPath: + path: /usr/bin/onegate + type: File + - name: onegaterb + hostPath: + path: /usr/bin/onegate.rb + type: File + - name: appliance + hostPath: + path: /etc/one-appliance/service.d/appliance/ + type: Directory + restartPolicy: Never + MANIFEST +end + +def detect_invalid_nodes + kubernetes_nodes = kubectl_get_nodes.dig 'items' + if kubernetes_nodes.nil? || kubernetes_nodes.empty? + msg :error, 'No Kubernetes nodes found' + exit 1 + end + + onegate_vms = all_vms_show + if onegate_vms.nil? || onegate_vms.empty? + msg :error, 'No Onegate VMs found' + exit 1 + end + + kubernetes_node_names = kubernetes_nodes + .map { |item| item.dig 'metadata', 'name' } + .reject(&:nil?) + .select { |item| item.start_with? 'oneke-ip-' } + + onegate_node_names = onegate_vms + .map { |item| item.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_NODE_NAME' } + .reject(&:nil?) + .select { |item| item.start_with? 'oneke-ip-' } + + kubernetes_node_names - onegate_node_names +end + +if caller.empty? + # The ruby / alpine container does not have bash pre-installed, + # but busybox / ash seems to be somewhat compatible, at least usable.. + # It cannot be a simple symlink, because busybox is a multi-call binary.. + file '/bin/bash', <<~SCRIPT, mode: 'u=rwx,go=rx', overwrite: false + #!/bin/ash + exec /bin/ash "$@" + SCRIPT + + detect_invalid_nodes.each do |name| + puts kubectl "delete node '#{name}'" + end +end diff --git a/appliances/OneKE/appliance/cleaner_spec.rb b/appliances/OneKE/appliance/cleaner_spec.rb new file mode 100644 index 00000000..bd61eb47 --- /dev/null +++ b/appliances/OneKE/appliance/cleaner_spec.rb @@ -0,0 +1,482 @@ +# frozen_string_literal: true + +require 'json' +require 'rspec' + +require_relative 'cleaner.rb' + +RSpec.describe 'detect_invalid_nodes' do + it 'should return list of invalid nodes (to be removed)' do + allow(self).to receive(:kubectl_get_nodes).and_return JSON.parse <<~'JSON' + { + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"6e:c7:7a:19:fb:7f\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.100", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.100/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.0.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:06:29Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-100", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/master": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + }, + "name": "oneke-ip-172-20-0-100", + "resourceVersion": "17537", + "uid": "e198b625-8c3b-40c5-b41b-acd994a73be3" + }, + "spec": { + "podCIDR": "10.244.0.0/24", + "podCIDRs": [ + "10.244.0.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/master" + } + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.100", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-100", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:07:04Z", + "lastTransitionTime": "2022-03-15T09:07:04Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:06:22Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:09:59Z", + "lastTransitionTime": "2022-03-15T09:07:02Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "612377df-f413-43ae-91d9-b9ab75d2661a", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "2f2741fd3cb14ef4b6560ae805e1756c", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "2f2741fd-3cb1-4ef4-b656-0ae805e1756c" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"driver.longhorn.io\":\"oneke-ip-172-20-0-101\"}", + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"fa:f6:f4:57:8f:2e\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.101", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.101/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.1.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:08:14Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-101", + "kubernetes.io/os": "linux" + }, + "name": "oneke-ip-172-20-0-101", + "resourceVersion": "17722", + "uid": "dc33eae6-73c2-4a91-90c7-990c2fa5cc11" + }, + "spec": { + "podCIDR": "10.244.1.0/24", + "podCIDRs": [ + "10.244.1.0/24" + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.101", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-101", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:08:25Z", + "lastTransitionTime": "2022-03-15T09:08:25Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:14Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:22Z", + "lastTransitionTime": "2022-03-15T09:08:25Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "b2b7b410-bc29-4a6d-b4a6-fdbf7328b6cb", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "1f5851ae52914927a1cf4c86427e0a36", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "1f5851ae-5291-4927-a1cf-4c86427e0a36" + } + } + }, + { + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "csi.volume.kubernetes.io/nodeid": "{\"driver.longhorn.io\":\"oneke-ip-172-20-0-102\"}", + "flannel.alpha.coreos.com/backend-data": "{\"VtepMAC\":\"1a:f1:ed:df:19:cd\"}", + "flannel.alpha.coreos.com/backend-type": "vxlan", + "flannel.alpha.coreos.com/kube-subnet-manager": "true", + "flannel.alpha.coreos.com/public-ip": "172.20.0.102", + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "projectcalico.org/IPv4Address": "172.20.0.102/24", + "projectcalico.org/IPv4IPIPTunnelAddr": "10.244.2.1", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2022-03-15T09:08:28Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "oneke-ip-172-20-0-102", + "kubernetes.io/os": "linux", + "node.longhorn.io/create-default-disk": "true" + }, + "name": "oneke-ip-172-20-0-102", + "resourceVersion": "17746", + "uid": "cb5c7412-0ec8-47a6-9caa-5fd8bd720684" + }, + "spec": { + "podCIDR": "10.244.2.0/24", + "podCIDRs": [ + "10.244.2.0/24" + ], + "taints": [ + { + "effect": "NoSchedule", + "key": "node.longhorn.io/create-default-disk", + "value": "true" + } + ] + }, + "status": { + "addresses": [ + { + "address": "172.20.0.102", + "type": "InternalIP" + }, + { + "address": "oneke-ip-172-20-0-102", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "2", + "ephemeral-storage": "18566299208", + "hugepages-2Mi": "0", + "memory": "1939544Ki", + "pods": "110" + }, + "capacity": { + "cpu": "2", + "ephemeral-storage": "20145724Ki", + "hugepages-2Mi": "0", + "memory": "2041944Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2022-03-15T09:08:39Z", + "lastTransitionTime": "2022-03-15T09:08:39Z", + "message": "Flannel is running on this node", + "reason": "FlannelIsUp", + "status": "False", + "type": "NetworkUnavailable" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:28Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2022-03-15T11:11:32Z", + "lastTransitionTime": "2022-03-15T09:08:38Z", + "message": "kubelet is posting ready status. AppArmor enabled", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [], + "nodeInfo": { + "architecture": "amd64", + "bootID": "0df98c4d-163e-4468-b299-7d8fdb34a172", + "containerRuntimeVersion": "docker://20.10.13", + "kernelVersion": "5.4.0-1058-kvm", + "kubeProxyVersion": "v1.21.10", + "kubeletVersion": "v1.21.10", + "machineID": "69820ee32d094fdbbb065b80643a06dc", + "operatingSystem": "linux", + "osImage": "Ubuntu 20.04.4 LTS", + "systemUUID": "69820ee3-2d09-4fdb-bb06-5b80643a06dc" + } + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "", + "selfLink": "" + } + } + JSON + allow(self).to receive(:all_vms_show).and_return JSON.parse <<~JSON + [ + { + "VM": { + "NAME": "master_0_(service_21)", + "ID": "49", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-172-20-0-100", + "ONEGATE_K8S_HASH": "09a9ed140fec2fa1a2281a3125952d6f2951b67a67534647b0a606ae2d478f60", + "ONEGATE_K8S_MASTER": "172.20.0.100", + "ONEGATE_K8S_TOKEN": "sg7711.p19vy0eqxefc0lqz", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + }, + { + "VM": { + "NAME": "storage_0_(service_21)", + "ID": "51", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-172-20-0-102", + "READY": "YES", + "ROLE_NAME": "storage", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.102", + "MAC": "02:00:ac:14:00:66", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + ] + JSON + expect(detect_invalid_nodes).to eq ['oneke-ip-172-20-0-101'] + end +end diff --git a/appliances/OneKE/appliance/config.rb b/appliances/OneKE/appliance/config.rb new file mode 100644 index 00000000..74ccc499 --- /dev/null +++ b/appliances/OneKE/appliance/config.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +def env(name, default) + value = ENV.fetch name.to_s, '' + value = value.empty? ? default : value + value = %w[YES 1].include?(value.upcase) if default.instance_of?(String) && %w[YES NO].include?(default.upcase) + value +end + +ONE_SERVICE_VERSION = env :ONE_SERVICE_VERSION, '1.27' +ONE_SERVICE_AIRGAPPED = env :ONE_SERVICE_AIRGAPPED, 'YES' +ONE_SERVICE_SETUP_DIR = env :ONE_SERVICE_SETUP_DIR, '/opt/one-appliance' + +ONE_SERVICE_RKE2_RELEASE = env :ONE_SERVICE_RKE2_RELEASE, "#{ONE_SERVICE_VERSION}.2" +ONE_SERVICE_RKE2_VERSION = env :ONE_SERVICE_RKE2_VERSION, "v#{ONE_SERVICE_RKE2_RELEASE}+rke2r1" +ONE_SERVICE_HELM_VERSION = env :ONE_SERVICE_HELM_VERSION, '3.11.3' + +ONEAPP_K8S_MULTUS_ENABLED = env :ONEAPP_K8S_MULTUS_ENABLED, 'NO' +ONEAPP_K8S_MULTUS_CONFIG = env :ONEAPP_K8S_MULTUS_CONFIG, nil + +ONEAPP_K8S_CNI_PLUGIN = env :ONEAPP_K8S_CNI_PLUGIN, 'cilium' +ONEAPP_K8S_CNI_CONFIG = env :ONEAPP_K8S_CNI_CONFIG, nil +ONEAPP_K8S_CILIUM_RANGES = ENV.select { |key, _| key.start_with? 'ONEAPP_K8S_CILIUM_RANGE' }.values.freeze + +ONEAPP_K8S_LONGHORN_CHART_VERSION = env :ONEAPP_K8S_LONGHORN_CHART_VERSION, '1.4.1' +ONEAPP_K8S_LONGHORN_ENABLED = env :ONEAPP_K8S_LONGHORN_ENABLED, 'NO' + +ONEAPP_K8S_METALLB_CHART_VERSION = env :ONEAPP_K8S_METALLB_CHART_VERSION, '0.13.9' +ONEAPP_K8S_METALLB_ENABLED = env :ONEAPP_K8S_METALLB_ENABLED, 'NO' +ONEAPP_K8S_METALLB_CONFIG = env :ONEAPP_K8S_METALLB_CONFIG, nil +ONEAPP_K8S_METALLB_RANGES = ENV.select { |key, _| key.start_with? 'ONEAPP_K8S_METALLB_RANGE' }.values.freeze + +ONEAPP_K8S_TRAEFIK_CHART_VERSION = env :ONEAPP_K8S_TRAEFIK_CHART_VERSION, '23.0.0' +ONEAPP_K8S_TRAEFIK_ENABLED = env :ONEAPP_K8S_TRAEFIK_ENABLED, 'NO' + +ONEAPP_VROUTER_ETH0_VIP0 = env :ONEAPP_VROUTER_ETH0_VIP0, nil +ONEAPP_VROUTER_ETH1_VIP0 = env :ONEAPP_VROUTER_ETH1_VIP0, nil +ONEAPP_VNF_HAPROXY_LB2_PORT = env :ONEAPP_VNF_HAPROXY_LB2_PORT, '443' +ONEAPP_VNF_HAPROXY_LB3_PORT = env :ONEAPP_VNF_HAPROXY_LB3_PORT, '80' + +ONEAPP_K8S_EXTRA_SANS = env :ONEAPP_K8S_EXTRA_SANS, 'localhost,127.0.0.1' + +ONEAPP_STORAGE_DEVICE = env :ONEAPP_STORAGE_DEVICE, nil # for example '/dev/vdb' +ONEAPP_STORAGE_FILESYSTEM = env :ONEAPP_STORAGE_FILESYSTEM, 'xfs' +ONEAPP_STORAGE_MOUNTPOINT = env :ONEAPP_STORAGE_MOUNTPOINT, '/var/lib/longhorn' + +ONE_ADDON_DIR = env :ONE_ADDON_DIR, "#{ONE_SERVICE_SETUP_DIR}/addons" +ONE_AIRGAP_DIR = env :ONE_AIRGAP_DIR, "#{ONE_SERVICE_SETUP_DIR}/airgap" + +K8S_MANIFEST_DIR = env :K8S_MANIFEST_DIR, '/var/lib/rancher/rke2/server/manifests' +K8S_IMAGE_DIR = env :K8S_IMAGE_DIR, '/var/lib/rancher/rke2/agent/images' + +K8S_SUPERVISOR_EP = "#{ONEAPP_VROUTER_ETH0_VIP0}:9345" +K8S_CONTROL_PLANE_EP = "#{ONEAPP_VROUTER_ETH0_VIP0}:6443" + +RETRIES = 86 +SECONDS = 5 + +PACKAGES = %w[ + curl + gawk + gnupg + lsb-release + openssl + skopeo + zstd +].freeze + +KUBECONFIG = %w[/etc/rancher/rke2/rke2.yaml].freeze diff --git a/appliances/OneKE/appliance/helpers.rb b/appliances/OneKE/appliance/helpers.rb new file mode 100644 index 00000000..c263bcf7 --- /dev/null +++ b/appliances/OneKE/appliance/helpers.rb @@ -0,0 +1,242 @@ +# frozen_string_literal: true + +require 'base64' +require 'date' +require 'fileutils' +require 'json' +require 'ipaddr' +require 'logger' +require 'net/http' +require 'open3' +require 'socket' +require 'tempfile' +require 'uri' +require 'yaml' + +LOGGER_STDOUT = Logger.new(STDOUT) +LOGGER_STDERR = Logger.new(STDERR) + +LOGGERS = { + info: LOGGER_STDOUT.method(:info), + debug: LOGGER_STDERR.method(:debug), + warn: LOGGER_STDERR.method(:warn), + error: LOGGER_STDERR.method(:error) +}.freeze + +def msg(level, string) + LOGGERS[level].call string +end + +def slurp(path) + Base64.encode64(File.read(path)).lines.map(&:strip).join +end + +def file(path, content, mode: 'u=rw,go=r', overwrite: false) + return if !overwrite && File.exist?(path) + + FileUtils.mkdir_p File.dirname path + + File.write path, content + + FileUtils.chmod mode, path +end + +def bash(script, chomp: false, terminate: true) + command = 'exec /bin/bash --login -s' + + stdin_data = <<~SCRIPT + export DEBIAN_FRONTEND=noninteractive + set -o errexit -o nounset -o pipefail + set -x + #{script} + SCRIPT + + stdout, stderr, status = Open3.capture3 command, stdin_data: stdin_data + unless status.exitstatus.zero? + error_message = "#{status.exitstatus}: #{stderr}" + msg :error, error_message + + raise error_message unless terminate + + exit status.exitstatus + end + + chomp ? stdout.chomp : stdout +end + +def kubectl(arguments, namespace: nil, kubeconfig: KUBECONFIG) + kubeconfig = [kubeconfig].flatten.find { |path| !path.nil? && File.exist?(path) } + command = ['/var/lib/rancher/rke2/bin/kubectl'] + command << "--kubeconfig #{kubeconfig}" unless kubeconfig.nil? + command << "--namespace #{namespace}" unless namespace.nil? + command << arguments + bash command.flatten.join(' ') +end + +def kubectl_get_nodes + JSON.parse kubectl 'get nodes -o json' +end + +def kubectl_get_configmap(name, namespace: 'kube-system', kubeconfig: KUBECONFIG) + YAML.safe_load kubectl <<~COMMAND, namespace: namespace, kubeconfig: kubeconfig + get configmap/#{name} -o yaml + COMMAND +end + +def kubectl_apply_f(path, kubeconfig: KUBECONFIG) + kubectl "apply -f #{path}", kubeconfig: kubeconfig +end + +def kubectl_apply(manifest, kubeconfig: KUBECONFIG) + Tempfile.create do |temp_file| + temp_file.write manifest + temp_file.close + return kubectl_apply_f temp_file.path, kubeconfig: kubeconfig + end +end + +def pull_docker_images(images, dest_dir) + images.each do |image| + name, tag = image.split ':' + + path = "#{dest_dir}/#{name.gsub '/', '_'}.tar.zst" + + next if File.exist? path + + msg :info, "Pull #{name}:#{tag} -> #{path}" + + FileUtils.mkdir_p dest_dir + + bash <<~SCRIPT + skopeo copy 'docker://#{name}:#{tag}' 'docker-archive:/dev/fd/2:#{name}:#{tag}' 3>&1 1>&2 2>&3 \ + | zstd --ultra -o '#{path}' + SCRIPT + end +end + +def extract_images(manifest) + images = [] + + YAML.load_stream manifest do |document| + next if document.nil? + + if document.dig('kind') == 'HelmChart' + # NOTE: Aassuming all one-*.yaml manifests contain chartContent: and valuesContent: fields. + chart_tgz = Base64.decode64 document.dig('spec', 'chartContent') + values_yml = document.dig('spec', 'valuesContent') + + Dir.mktmpdir do |temp_dir| + file "#{temp_dir}/chart.tgz", chart_tgz, overwrite: true + file "#{temp_dir}/values.yml", values_yml, overwrite: true + images += extract_images bash("helm template '#{temp_dir}/chart.tgz' -f '#{temp_dir}/values.yml'") + end + + next + end + + containers = [] + containers += document.dig('spec', 'template', 'spec', 'containers') || [] + containers += document.dig('spec', 'template', 'spec', 'initContainers') || [] + containers += document.dig('spec', 'jobTemplate', 'spec', 'template', 'spec', 'containers') || [] + containers += document.dig('spec', 'jobTemplate', 'spec', 'template', 'spec', 'initContainers') || [] + + images += containers.map { |container| container.dig 'image' } + end + + images.uniq +end + +def pull_addon_images(addon_dir = ONE_ADDON_DIR, airgap_dir = ONE_AIRGAP_DIR) + Dir["#{addon_dir}/one-*.yaml"].each do |path| + manifest = File.read path + pull_docker_images extract_images(manifest), "#{airgap_dir}/#{File.basename(path, '.yaml')}/" + end +end + +# NOTE: This must be executed *before* starting rke2-server/agent services, +# otherwise images will not be loaded into containerd. +def include_images(name, airgap_dir = ONE_AIRGAP_DIR, image_dir = K8S_IMAGE_DIR) + FileUtils.mkdir_p image_dir + Dir["#{airgap_dir}/#{name}/*.tar.zst"].each do |path| + msg :info, "Include airgapped image: #{File.basename(path)}" + symlink = "#{image_dir}/#{File.basename(path)}" + File.symlink path, symlink unless File.exist? symlink + end +end + +# NOTE: This must be executed *after* starting rke2-server/agent services. +def include_manifests(name, addon_dir = ONE_ADDON_DIR, manifest_dir = K8S_MANIFEST_DIR) + FileUtils.mkdir_p manifest_dir + Dir["#{addon_dir}/#{name}*.yaml"].each do |path| + msg :info, "Include addon: #{File.basename(path)}" + symlink = "#{manifest_dir}/#{File.basename(path)}" + File.symlink path, symlink unless File.exist? symlink + end +end + +def with_policy_rc_d_disabled + file '/usr/sbin/policy-rc.d', 'exit 101', mode: 'a+x', overwrite: true + yield +ensure + file '/usr/sbin/policy-rc.d', 'exit 0', mode: 'a+x', overwrite: true +end + +def install_packages(packages, hold: false) + msg :info, "Install APT packages: #{packages.join(',')}" + + puts bash <<~SCRIPT + apt-get install -y #{packages.join(' ')} + SCRIPT + + bash <<~SCRIPT if hold + apt-mark hold #{packages.join(' ')} + SCRIPT +end + +def ipv4?(string) + string.is_a?(String) && IPAddr.new(string) ? true : false +rescue IPAddr::InvalidAddressError + false +end + +def integer?(string) + Integer(string) ? true : false +rescue ArgumentError + false +end + +alias port? integer? + +def tcp_port_open?(ipv4, port, seconds = 5) + # > If a block is given, the block is called with the socket. + # > The value of the block is returned. + # > The socket is closed when this method returns. + Socket.tcp(ipv4, port, connect_timeout: seconds) {} + true +rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ETIMEDOUT + false +end + +def http_status_200?(url, + cacert = '/var/lib/rancher/rke2/server/tls/server-ca.crt', + cert = '/var/lib/rancher/rke2/server/tls/client-admin.crt', + key = '/var/lib/rancher/rke2/server/tls/client-admin.key', + seconds = 5) + + url = URI.parse url + http = Net::HTTP.new url.host, url.port + + if url.scheme == 'https' + http.use_ssl = true + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + http.ca_file = cacert + http.cert = OpenSSL::X509::Certificate.new File.read cert + http.key = OpenSSL::PKey::EC.new File.read key + end + + http.open_timeout = seconds + + http.get(url.path).code == '200' +rescue Errno::ECONNREFUSED, Errno::ECONNRESET, Errno::EHOSTUNREACH, Errno::ETIMEDOUT, Net::OpenTimeout + false +end diff --git a/appliances/OneKE/appliance/helpers_spec.rb b/appliances/OneKE/appliance/helpers_spec.rb new file mode 100644 index 00000000..51e1f22f --- /dev/null +++ b/appliances/OneKE/appliance/helpers_spec.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require 'rspec' + +require_relative 'helpers.rb' + +RSpec.describe 'bash' do + it 'should raise' do + allow(self).to receive(:exit).and_return nil + expect { bash 'false', terminate: false }.to raise_error(RuntimeError) + end + it 'should not raise' do + allow(self).to receive(:exit).and_return nil + expect { bash 'false' }.not_to raise_error + end +end + +RSpec.describe 'ipv4?' do + it 'should evaluate to true' do + ipv4s = %w[ + 10.11.12.13 + 10.11.12.13/24 + 10.11.12.13/32 + 192.168.144.120 + ] + ipv4s.each do |item| + expect(ipv4?(item)).to be true + end + end + it 'should evaluate to false' do + ipv4s = %w[ + 10.11.12 + 10.11.12. + 10.11.12.256 + asd.168.144.120 + 192.168.144.96-192.168.144.120 + ] + ipv4s.each do |item| + expect(ipv4?(item)).to be false + end + end +end diff --git a/appliances/OneKE/appliance/kubernetes.rb b/appliances/OneKE/appliance/kubernetes.rb new file mode 100644 index 00000000..93fc0e7d --- /dev/null +++ b/appliances/OneKE/appliance/kubernetes.rb @@ -0,0 +1,312 @@ +# frozen_string_literal: true + +require 'securerandom' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' +require_relative 'vnf.rb' + +def install_kubernetes(airgap_dir = ONE_AIRGAP_DIR) + rke2_release_url = "https://github.com/rancher/rke2/releases/download/#{ONE_SERVICE_RKE2_VERSION}" + + msg :info, "Install RKE2 runtime: #{ONE_SERVICE_RKE2_VERSION}" + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2.linux-amd64.tar.gz' | tar -xz -f- -C /usr/local/ + SCRIPT + + msg :info, "Download RKE2 airgapped image archives: #{ONE_SERVICE_RKE2_VERSION}" + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-core.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-core/rke2-images-core.linux-amd64.tar.zst' + SCRIPT + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-multus.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-multus/rke2-images-multus.linux-amd64.tar.zst' + SCRIPT + bash <<~SCRIPT + curl -fsSL '#{rke2_release_url}/rke2-images-cilium.linux-amd64.tar.zst' \ + | install -o 0 -g 0 -m u=rw,go=r -D /dev/fd/0 '#{airgap_dir}/rke2-images-cilium/rke2-images-cilium.linux-amd64.tar.zst' + SCRIPT + + msg :info, "Install Helm binary: #{ONE_SERVICE_HELM_VERSION}" + bash <<~SCRIPT + curl -fsSL 'https://get.helm.sh/helm-v#{ONE_SERVICE_HELM_VERSION}-linux-amd64.tar.gz' \ + | tar -xOz -f- linux-amd64/helm \ + | install -o 0 -g 0 -m u=rwx,go=rx -D /dev/fd/0 /usr/local/bin/helm + SCRIPT + + msg :info, 'Link kubectl binary' + File.symlink '/var/lib/rancher/rke2/bin/kubectl', '/usr/local/bin/kubectl' + + msg :info, 'Link crictl binary' + File.symlink '/var/lib/rancher/rke2/bin/crictl', '/usr/local/bin/crictl' + + msg :info, 'Set BASH profile defaults' + file '/etc/profile.d/98-oneke.sh', <<~PROFILE, mode: 'u=rw,go=r' + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml + PROFILE +end + +def configure_kubernetes(configure_cni: ->{}, configure_addons: ->{}) + node = detect_node + + if node[:init_master] + configure_cni.() + init_master + configure_addons.() + elsif node[:join_master] + configure_cni.() + join_master node[:token] + configure_addons.() + elsif node[:join_worker] + join_worker node[:token] + elsif node[:join_storage] + join_storage node[:token] + end + + node +end + +def wait_for_any_master(retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for any master to be available' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_any_master / #{retry_num}" + + master_vms_show.each do |master_vm| + ready = master_vm.dig 'VM', 'USER_TEMPLATE', 'READY' + next unless ready == 'YES' + + # Not using the CP/EP here, only a direct validation without going through VNF/LB. + # The first responding master wins. + + k8s_master = master_vm.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_MASTER' + next if k8s_master.nil? + + return master_vm if tcp_port_open? k8s_master, 6443 + end + + if retry_num.zero? + msg :error, 'No usable master found' + exit 1 + end + + sleep seconds + end +end + +def wait_for_control_plane(endpoint = K8S_CONTROL_PLANE_EP, retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for Control-Plane to be ready' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_control_plane / #{retry_num}" + + break if http_status_200? "https://#{endpoint}/readyz" + + if retry_num.zero? + msg :error, 'Control-Plane not ready' + exit 1 + end + + sleep seconds + end +end + +def wait_for_kubelets(retries = RETRIES, seconds = SECONDS) + msg :info, 'Wait for available Kubelets to be ready' + + retries.times.to_a.reverse.each do |retry_num| + msg :debug, "wait_for_kubelets / #{retry_num}" + + conditions = kubectl_get_nodes['items'].map do |node| + node.dig('status', 'conditions').find do |item| + item['reason'] == 'KubeletReady' && item['type'] == 'Ready' && item['status'] == 'True' + end + end + + break if conditions.all? + + if retry_num.zero? + msg :error, 'Kubelets not ready' + exit 1 + end + + sleep seconds + end +end + +def init_master + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + msg :info, 'Set this master to be the first VNF backend' + vnf_supervisor_setup_backend + vnf_control_plane_setup_backend + + cni = [] + cni << 'multus' if ONEAPP_K8S_MULTUS_ENABLED + cni << ONEAPP_K8S_CNI_PLUGIN + + server_config = { + 'node-name' => name, + 'token' => SecureRandom.uuid, + 'tls-san' => ONEAPP_K8S_EXTRA_SANS.split(',').map(&:strip).append(ONEAPP_VROUTER_ETH0_VIP0), + 'node-taint' => ['CriticalAddonsOnly=true:NoExecute'], + 'disable' => ['rke2-ingress-nginx'], + 'cni' => cni, + 'disable-kube-proxy' => ONEAPP_K8S_CNI_PLUGIN == 'cilium' + } + + msg :info, 'Prepare initial rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: false + + msg :info, "Initialize first master: #{name}" + bash 'systemctl enable rke2-server.service --now' + + server_config.merge!({ + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => File.read('/var/lib/rancher/rke2/server/node-token', encoding: 'utf-8').strip + }) + + msg :info, 'Normalize rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: true + + onegate_vm_update ["ONEGATE_K8S_MASTER=#{ipv4}", "ONEGATE_K8S_TOKEN=#{server_config['token']}"] + + wait_for_control_plane + wait_for_kubelets +end + +def join_master(token, retries = RETRIES, seconds = SECONDS) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + cni = [] + cni << 'multus' if ONEAPP_K8S_MULTUS_ENABLED + cni << ONEAPP_K8S_CNI_PLUGIN + + server_config = { + 'node-name' => name, + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token, + 'tls-san' => ONEAPP_K8S_EXTRA_SANS.split(',').map(&:strip).append(ONEAPP_VROUTER_ETH0_VIP0), + 'node-taint' => ['CriticalAddonsOnly=true:NoExecute'], + 'disable' => ['rke2-ingress-nginx'], + 'cni' => cni, + 'disable-kube-proxy' => ONEAPP_K8S_CNI_PLUGIN == 'cilium' + } + + msg :info, 'Prepare rke2-server config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(server_config), overwrite: true + + # The rke2-server systemd service restarts automatically and eventually joins. + # If it really cannot join we want to reflect this in OneFlow. + retries.times.to_a.reverse.each do |retry_num| + if retry_num.zero? + msg :error, 'Unable to join Control-Plane' + exit 1 + end + begin + msg :info, "Join master: #{name} / #{retry_num}" + bash 'systemctl enable rke2-server.service --now', terminate: false + rescue RuntimeError + sleep seconds + next + end + break + end + + onegate_vm_update ["ONEGATE_K8S_MASTER=#{ipv4}", "ONEGATE_K8S_TOKEN=#{server_config['token']}"] + + msg :info, 'Set this master to be a VNF backend' + vnf_supervisor_setup_backend + vnf_control_plane_setup_backend + + wait_for_control_plane + wait_for_kubelets +end + +def join_worker(token) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + agent_config = { + 'node-name' => name, + 'server' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token + } + + msg :info, 'Prepare rke2-agent config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(agent_config), overwrite: true + + msg :info, "Join worker: #{name}" + bash 'systemctl enable rke2-agent.service --now' +end + +def join_storage(token) + ipv4 = external_ipv4s.first + name = "oneke-ip-#{ipv4.gsub '.', '-'}" + + msg :info, "Set local hostname: #{name}" + bash "hostnamectl set-hostname #{name}" + + onegate_vm_update ["ONEGATE_K8S_NODE_NAME=#{name}"] + + agent_config = { + 'node-name' => name, + 'server ' => "https://#{K8S_SUPERVISOR_EP}", + 'token' => token, + 'node-taint' => ['node.longhorn.io/create-default-disk=true:NoSchedule'], + 'node-label' => ['node.longhorn.io/create-default-disk=true'] + } + + msg :info, 'Prepare rke2-agent config' + file '/etc/rancher/rke2/config.yaml', YAML.dump(agent_config), overwrite: true + + msg :info, "Join storage: #{name}" + bash 'systemctl enable rke2-agent.service --now' +end + +def detect_node + current_vm = onegate_vm_show + current_vmid = current_vm.dig 'VM', 'ID' + current_role = current_vm.dig 'VM', 'USER_TEMPLATE', 'ROLE_NAME' + + master_vm = master_vm_show + master_vmid = master_vm.dig 'VM', 'ID' + + master_vm = wait_for_any_master if current_vmid != master_vmid + + token = master_vm.dig 'VM', 'USER_TEMPLATE', 'ONEGATE_K8S_TOKEN' + + ready_to_join = !token.nil? + + results = { + init_master: current_role == 'master' && current_vmid == master_vmid && !ready_to_join, + join_master: current_role == 'master' && current_vmid != master_vmid && ready_to_join, + join_worker: current_role == 'worker' && current_vmid != master_vmid && ready_to_join, + join_storage: current_role == 'storage' && current_vmid != master_vmid && ready_to_join, + token: token + } + + msg :debug, "detect_node / #{results}" + results +end diff --git a/appliances/OneKE/appliance/longhorn.rb b/appliances/OneKE/appliance/longhorn.rb new file mode 100644 index 00000000..68d26dbf --- /dev/null +++ b/appliances/OneKE/appliance/longhorn.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_longhorn(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install Longhorn' + fetch_longhorn addon_dir + pull_longhorn_images if ONE_SERVICE_AIRGAPPED +end + +def prepare_dedicated_storage + msg :info, 'Setup dedicated storage and populate /etc/fstab' + + # Previously executed in a start script, moved here because the start script was causing race condition issues. + puts bash <<~SCRIPT + # Silently abort when there is no disk attached. + if ! lsblk -n -o name '#{ONEAPP_STORAGE_DEVICE}'; then exit 0; fi + + # Make sure mountpoint exists. + install -o 0 -g 0 -m u=rwx,go=rx -d '#{ONEAPP_STORAGE_MOUNTPOINT}' + + # Silently abort when mountpoint is taken. + if mountpoint '#{ONEAPP_STORAGE_MOUNTPOINT}'; then exit 0; fi + + # Create new filesystem if the device does not contain any. + if ! blkid -s TYPE -o value '#{ONEAPP_STORAGE_DEVICE}'; then + 'mkfs.#{ONEAPP_STORAGE_FILESYSTEM}' '#{ONEAPP_STORAGE_DEVICE}' + fi + + export STORAGE_UUID=$(blkid -s UUID -o value '#{ONEAPP_STORAGE_DEVICE}') + # Assert that the detected UUID is not empty. + if [[ -z "$STORAGE_UUID" ]]; then exit 1; fi + + # Update fstab if necessary. + gawk -i inplace -f- /etc/fstab <s" + valuesContent: | + defaultSettings: + createDefaultDiskLabeledNodes: true + taintToleration: "node.longhorn.io/create-default-disk=true:NoSchedule" + longhornManager: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + longhornDriver: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + nodeSelector: + node.longhorn.io/create-default-disk: "true" + longhornUI: + tolerations: + - key: node.longhorn.io/create-default-disk + value: "true" + operator: Equal + effect: NoSchedule + nodeSelector: + node.longhorn.io/create-default-disk: "true" + --- + # Please note, changing default storage class is discouraged: https://longhorn.io/docs/1.3.0/best-practices/#storageclass + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn-retain + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: Retain + volumeBindingMode: Immediate + parameters: + fsType: "ext4" + numberOfReplicas: "3" + staleReplicaTimeout: "2880" + fromBackup: "" + MANIFEST + + msg :info, "Generate Longhorn addon manifest: #{ONEAPP_K8S_LONGHORN_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull longhorn/longhorn --version '#{ONEAPP_K8S_LONGHORN_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/longhorn-#{ONEAPP_K8S_LONGHORN_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-longhorn.yaml", manifest, overwrite: true + end +end + +def pull_longhorn_images(airgap_dir = ONE_AIRGAP_DIR) + # https://longhorn.io/docs/1.3.0/advanced-resources/deploy/airgap/ + + msg :info, "Pull Longhorn images: #{ONEAPP_K8S_LONGHORN_CHART_VERSION}" + + images = bash <<~SCRIPT, chomp: true + curl -fsSL 'https://raw.githubusercontent.com/longhorn/longhorn/v#{ONEAPP_K8S_LONGHORN_CHART_VERSION}/deploy/longhorn-images.txt' + SCRIPT + + images = images.lines + .map(&:strip) + .reject(&:empty?) + + pull_docker_images images, "#{airgap_dir}/one-longhorn/" +end diff --git a/appliances/OneKE/appliance/metallb.rb b/appliances/OneKE/appliance/metallb.rb new file mode 100644 index 00000000..d290d40b --- /dev/null +++ b/appliances/OneKE/appliance/metallb.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_metallb(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install MetalLB' + fetch_metallb addon_dir +end + +def configure_metallb(addon_dir = ONE_ADDON_DIR) + msg :info, 'Configure MetalLB' + + if ONEAPP_K8S_METALLB_CONFIG.nil? + msg :info, 'Create MetalLB CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: [] + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: [default] + MANIFEST + + unless ONEAPP_K8S_METALLB_RANGES.empty? + ip_address_pool = documents.find do |doc| + doc['kind'] == 'IPAddressPool' && doc.dig('metadata', 'name') == 'default' + end + ip_address_pool['spec']['addresses'] = extract_metallb_ranges.map { |item| item.join('-') } + end + else + msg :info, 'Use MetalLB user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_METALLB_CONFIG + end + + msg :info, 'Generate MetalLB config manifest' + manifest = YAML.dump_stream *documents + file "#{addon_dir}/one-metallb-config.yaml", manifest, overwrite: true +end + +def fetch_metallb(addon_dir = ONE_ADDON_DIR) + bash <<~SCRIPT + helm repo add metallb https://metallb.github.io/metallb + helm repo update + SCRIPT + + manifest = <<~MANIFEST + --- + apiVersion: v1 + kind: Namespace + metadata: + name: metallb-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: one-metallb + namespace: kube-system + spec: + bootstrap: false + targetNamespace: metallb-system + chartContent: "%s" + valuesContent: | + controller: + image: + pullPolicy: IfNotPresent + speaker: + image: + pullPolicy: IfNotPresent + MANIFEST + + msg :info, "Generate MetalLB addon manifest: #{ONEAPP_K8S_METALLB_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull metallb/metallb --version '#{ONEAPP_K8S_METALLB_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/metallb-#{ONEAPP_K8S_METALLB_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-metallb.yaml", manifest, overwrite: true + end +end + +def extract_metallb_ranges(ranges = ONEAPP_K8S_METALLB_RANGES) + ranges.compact + .map(&:strip) + .reject(&:empty?) + .map { |item| item.split('-').map(&:strip) } + .reject { |item| item.length > 2 } + .map { |item| item.length == 1 ? [item.first, item.first] : item } + .reject { |item| item.map(&:empty?).any? } + .reject { |item| !(ipv4?(item.first) && ipv4?(item.last)) } +end diff --git a/appliances/OneKE/appliance/metallb_spec.rb b/appliances/OneKE/appliance/metallb_spec.rb new file mode 100644 index 00000000..89e4b353 --- /dev/null +++ b/appliances/OneKE/appliance/metallb_spec.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true + +require 'base64' +require 'rspec' +require 'tmpdir' +require 'yaml' + +require_relative 'metallb.rb' + +RSpec.describe 'extract_metallb_ranges' do + it 'should extract and return all ranges (positive)' do + input = [ + '10.11.12.13', + '10.11.12.13-', + '10.11.12.13-10.11.12.31', + ' 10.11.12.13-10.11.12.31', + '10.11.12.13-10.11.12.31 ', + '10.11.12.13 -10.11.12.31', + '10.11.12.13- 10.11.12.31' + ] + output = [ + %w[10.11.12.13 10.11.12.13], + %w[10.11.12.13 10.11.12.13], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31], + %w[10.11.12.13 10.11.12.31] + ] + expect(extract_metallb_ranges(input)).to eq output + end + + it 'should extract and return no ranges (negative)' do + input = [ + '', + '-10.11.12.13', + 'asd.11.12.13-10.11.12.31', + '10.11.12.13-10.11.12.31-10.11.12.123' + ] + output = [] + expect(extract_metallb_ranges(input)).to eq output + end +end + +RSpec.describe 'configure_metallb' do + it 'should apply user-defined ranges (empty)' do + stub_const 'ONEAPP_K8S_METALLB_CONFIG', nil + stub_const 'ONEAPP_K8S_METALLB_RANGES', [] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: [] + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined ranges' do + stub_const 'ONEAPP_K8S_METALLB_CONFIG', nil + stub_const 'ONEAPP_K8S_METALLB_RANGES', ['192.168.150.87-192.168.150.88'] + output = YAML.load_stream <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: + - 192.168.150.87-192.168.150.88 + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + + it 'should apply user-defined config manifest (and ignore user-defined ranges)' do + manifest = <<~MANIFEST + --- + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: default + namespace: metallb-system + spec: + addresses: + - 192.168.150.87-192.168.150.88 + --- + apiVersion: metallb.io/v1beta1 + kind: L2Advertisement + metadata: + name: default + namespace: metallb-system + spec: + ipAddressPools: + - default + MANIFEST + stub_const 'ONEAPP_K8S_METALLB_CONFIG', Base64.encode64(manifest) + stub_const 'ONEAPP_K8S_METALLB_RANGES', ['1.2.3.4-1.2.3.4'] + output = YAML.load_stream manifest + Dir.mktmpdir do |temp_dir| + configure_metallb temp_dir + result = YAML.load_stream File.read "#{temp_dir}/one-metallb-config.yaml" + expect(result).to eq output + end + end + +end diff --git a/appliances/OneKE/appliance/multus.rb b/appliances/OneKE/appliance/multus.rb new file mode 100644 index 00000000..ee040e1b --- /dev/null +++ b/appliances/OneKE/appliance/multus.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require 'base64' +require 'yaml' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def configure_multus(manifest_dir = K8S_MANIFEST_DIR) + msg :info, 'Configure Multus' + + if ONEAPP_K8S_MULTUS_CONFIG.nil? + msg :info, 'Create Multus CRD config from user-provided ranges' + + documents = YAML.load_stream <<~MANIFEST + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-multus + namespace: kube-system + spec: + valuesContent: |- + rke2-whereabouts: + enabled: true + MANIFEST + else + msg :info, 'Use Multus user-provided config' + documents = YAML.load_stream Base64.decode64 ONEAPP_K8S_MULTUS_CONFIG + end + + msg :info, 'Generate Multus config manifest' + manifest = YAML.dump_stream *documents + file "#{manifest_dir}/rke2-multus-config.yaml", manifest, overwrite: true +end diff --git a/appliances/OneKE/appliance/onegate.rb b/appliances/OneKE/appliance/onegate.rb new file mode 100644 index 00000000..f1c7e511 --- /dev/null +++ b/appliances/OneKE/appliance/onegate.rb @@ -0,0 +1,133 @@ +# frozen_string_literal: true + +require 'json' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def onegate_service_show + JSON.parse bash 'onegate --json service show' +end + +def onegate_vm_show(vmid = '') + JSON.parse bash "onegate --json vm show #{vmid}" +end + +def onegate_vm_update(data, vmid = '') + bash "onegate vm update #{vmid} --data \"#{data.join('\n')}\"" +end + +def ip_addr_show(ifname = '') + JSON.parse bash "ip --json addr show #{ifname}" +end + +def all_vms_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + vmids = roles.each_with_object [] do |role, acc| + nodes = role.dig 'nodes' + next if nodes.nil? + + nodes.each do |node| + acc << node.dig('vm_info', 'VM', 'ID') + end + end + + vmids.each_with_object [] do |vmid, acc| + acc << onegate_vm_show(vmid) + end +end + +def master_vms_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + role = roles.find { |item| item['name'] == 'master' } + if role.nil? + msg :error, 'No master role found in Onegate' + exit 1 + end + + nodes = role.dig 'nodes' + if nodes.empty? + msg :error, 'No master nodes found in Onegate' + exit 1 + end + + vmids = nodes.map { |node| node.dig 'vm_info', 'VM', 'ID' } + + vmids.each_with_object [] do |vmid, acc| + acc << onegate_vm_show(vmid) + end +end + +def master_vm_show + onegate_service = onegate_service_show + + roles = onegate_service.dig 'SERVICE', 'roles' + if roles.empty? + msg :error, 'No roles found in Onegate' + exit 1 + end + + role = roles.find { |item| item['name'] == 'master' } + if role.nil? + msg :error, 'No master role found in Onegate' + exit 1 + end + + nodes = role.dig 'nodes' + if nodes.empty? + msg :error, 'No nodes found in Onegate' + exit 1 + end + + vmid = nodes.first.dig 'vm_info', 'VM', 'ID' + + onegate_vm_show vmid +end + +def external_ipv4s + onegate_vm = onegate_vm_show + + nics = onegate_vm.dig 'VM', 'TEMPLATE', 'NIC' + if nics.empty? + msg :error, 'No nics found in Onegate' + exit 1 + end + + ip_addr = ip_addr_show + if ip_addr.empty? + msg :error, 'No local addresses found' + exit 1 + end + + ipv4s = nics.each_with_object [] do |nic, acc| + addr = ip_addr.find do |item| + next unless item['address'].downcase == nic['MAC'].downcase + + item['addr_info'].find do |info| + info['family'] == 'inet' && info['local'] == nic['IP'] + end + end + acc << nic['IP'] unless addr.nil? + end + + if ipv4s.empty? + msg :error, 'No IPv4 addresses found' + exit 1 + end + + ipv4s +end diff --git a/appliances/OneKE/appliance/onegate_spec.rb b/appliances/OneKE/appliance/onegate_spec.rb new file mode 100644 index 00000000..67aa0e7e --- /dev/null +++ b/appliances/OneKE/appliance/onegate_spec.rb @@ -0,0 +1,559 @@ +# frozen_string_literal: true + +require 'json' +require 'rspec' + +require_relative 'onegate.rb' + +RSpec.describe 'all_vms_show' do + before do + @svc = JSON.parse(<<~JSON) + { + "SERVICE": { + "name": "asd", + "id": "21", + "state": 2, + "roles": [ + { + "name": "master", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 49, + "running": null, + "vm_info": { + "VM": { + "ID": "49", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_0_(service_21)" + } + } + } + ] + }, + { + "name": "worker", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 50, + "running": null, + "vm_info": { + "VM": { + "ID": "50", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "worker_0_(service_21)" + } + } + } + ] + }, + { + "name": "storage", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 51, + "running": null, + "vm_info": { + "VM": { + "ID": "51", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "storage_0_(service_21)" + } + } + } + ] + } + ] + } + } + JSON + @vms = [] + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_0_(service_21)", + "ID": "49", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_HASH": "09a9ed140fec2fa1a2281a3125952d6f2951b67a67534647b0a606ae2d478f60", + "ONEGATE_K8S_MASTER": "172.20.0.100", + "ONEGATE_K8S_TOKEN": "sg7711.p19vy0eqxefc0lqz", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "worker_0_(service_21)", + "ID": "50", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "READY": "YES", + "ROLE_NAME": "worker", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.101", + "MAC": "02:00:ac:14:00:65", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "storage_0_(service_21)", + "ID": "51", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_ADDRESS,ONEAPP_K8S_TOKEN,ONEAPP_K8S_HASH,ONEAPP_K8S_NODENAME,ONEAPP_K8S_PORT,ONEAPP_K8S_TAINTED_MASTER,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_ADMIN_USERNAME,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "READY": "YES", + "ROLE_NAME": "storage", + "SERVICE_ID": "21", + "USER_INPUTS": { + "ONEAPP_K8S_ADDRESS": "O|text|Master node address", + "ONEAPP_K8S_HASH": "O|text|Secret hash (to join node into the cluster)", + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_NODENAME": "O|text|Master node name", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)", + "ONEAPP_K8S_TOKEN": "O|password|Secret token (to join node into the cluster)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.102", + "MAC": "02:00:ac:14:00:66", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + end + it 'should return all vms belonging to svc' do + allow(self).to receive(:onegate_service_show).and_return(@svc) + allow(self).to receive(:onegate_vm_show).and_return(*@vms) + expect(all_vms_show.map { |item| item['VM']['TEMPLATE']['NIC'][0]['IP'] }).to eq ['172.20.0.100', '172.20.0.101', '172.20.0.102'] + end +end + +RSpec.describe 'master_vms_show' do + before do + @svc = JSON.parse(<<~JSON) + { + "SERVICE": { + "name": "asd", + "id": "4", + "state": 10, + "roles": [ + { + "name": "vnf", + "cardinality": 1, + "state": 2, + "nodes": [ + { + "deploy_id": 12, + "running": null, + "vm_info": { + "VM": { + "ID": "12", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "vnf_0_(service_4)" + } + } + } + ] + }, + { + "name": "master", + "cardinality": 3, + "state": 10, + "nodes": [ + { + "deploy_id": 13, + "running": null, + "vm_info": { + "VM": { + "ID": "13", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_0_(service_4)" + } + } + }, + { + "deploy_id": 14, + "running": null, + "vm_info": { + "VM": { + "ID": "14", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_1_(service_4)" + } + } + }, + { + "deploy_id": 15, + "running": null, + "vm_info": { + "VM": { + "ID": "15", + "UID": "0", + "GID": "0", + "UNAME": "oneadmin", + "GNAME": "oneadmin", + "NAME": "master_2_(service_4)" + } + } + } + ] + }, + { + "name": "worker", + "cardinality": 0, + "state": 2, + "nodes": [] + }, + { + "name": "storage", + "cardinality": 0, + "state": 2, + "nodes": [] + } + ] + } + } + JSON + @vms = [] + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_0_(service_4)", + "ID": "13", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_HASH": "c74201821cb4878b6896d3284f825be738cb11dbc2c5153e88c84da0b3d3ab04", + "ONEGATE_K8S_KEY": "146ecb3e9d8bce9f584f55b234bd2700d2a7747177fb8fd60f42a161a48e7c07", + "ONEGATE_K8S_MASTER": "10.2.11.201", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-201", + "ONEGATE_K8S_TOKEN": "ifv2c4.h8d88lzjlyl5mkod", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.201", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.201", + "MAC": "02:00:0a:02:0b:c9", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_1_(service_4)", + "ID": "14", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-202", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.202", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.202", + "MAC": "02:00:0a:02:0b:ca", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + @vms << JSON.parse(<<~JSON) + { + "VM": { + "NAME": "master_2_(service_4)", + "ID": "15", + "STATE": "3", + "LCM_STATE": "3", + "USER_TEMPLATE": { + "INPUTS_ORDER": "ONEAPP_K8S_PORT,ONEAPP_K8S_PODS_NETWORK,ONEAPP_K8S_METALLB_RANGE,ONEAPP_K8S_METALLB_CONFIG", + "ONEGATE_K8S_NODE_NAME": "oneke-ip-10-2-11-203", + "ONEGATE_LB0_IP": "10.2.11.86", + "ONEGATE_LB0_PORT": "6443", + "ONEGATE_LB0_PROTOCOL": "TCP", + "ONEGATE_LB0_SERVER_HOST": "10.2.11.203", + "ONEGATE_LB0_SERVER_PORT": "6443", + "READY": "YES", + "ROLE_NAME": "master", + "SERVICE_ID": "4", + "USER_INPUTS": { + "ONEAPP_K8S_METALLB_CONFIG": "O|text64|Custom MetalLB config", + "ONEAPP_K8S_METALLB_RANGE": "O|text|MetalLB IP range (default none)", + "ONEAPP_K8S_PODS_NETWORK": "O|text|Pods network in CIDR (default 10.244.0.0/16)", + "ONEAPP_K8S_PORT": "O|text|Kubernetes API port (default 6443)" + } + }, + "TEMPLATE": { + "NIC": [ + { + "IP": "10.2.11.203", + "MAC": "02:00:0a:02:0b:cb", + "NAME": "_NIC0", + "NETWORK": "service" + } + ], + "NIC_ALIAS": [] + } + } + } + JSON + end + it 'should return all vms belonging to the master role' do + allow(self).to receive(:onegate_service_show).and_return(@svc) + allow(self).to receive(:onegate_vm_show).and_return(*@vms) + expect(master_vms_show.map { |item| item['VM']['TEMPLATE']['NIC'][0]['IP'] }).to eq ['10.2.11.201', '10.2.11.202', '10.2.11.203'] + end +end + +RSpec.describe 'external_ipv4s' do + it 'should return list of ipv4 addresses' do + allow(self).to receive(:onegate_vm_show).and_return JSON.parse <<~JSON + { + "VM": { + "TEMPLATE": { + "NIC": [ + { + "IP": "172.20.0.100", + "MAC": "02:00:ac:14:00:64", + "NAME": "_NIC0", + "NETWORK": "service" + } + ] + } + } + } + JSON + allow(self).to receive(:ip_addr_show).and_return JSON.parse <<~JSON + [ + { + "ifindex": 1, + "ifname": "lo", + "flags": [ + "LOOPBACK", + "UP", + "LOWER_UP" + ], + "mtu": 65536, + "qdisc": "noqueue", + "operstate": "UNKNOWN", + "group": "default", + "txqlen": 1000, + "link_type": "loopback", + "address": "00:00:00:00:00:00", + "broadcast": "00:00:00:00:00:00", + "addr_info": [ + { + "family": "inet", + "local": "127.0.0.1", + "prefixlen": 8, + "scope": "host", + "label": "lo", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + }, + { + "family": "inet6", + "local": "::1", + "prefixlen": 128, + "scope": "host", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + }, + { + "ifindex": 2, + "ifname": "eth0", + "flags": [ + "BROADCAST", + "MULTICAST", + "UP", + "LOWER_UP" + ], + "mtu": 1500, + "qdisc": "pfifo_fast", + "operstate": "UP", + "group": "default", + "txqlen": 1000, + "link_type": "ether", + "address": "02:00:ac:14:00:64", + "broadcast": "ff:ff:ff:ff:ff:ff", + "addr_info": [ + { + "family": "inet", + "local": "172.20.0.100", + "prefixlen": 24, + "broadcast": "172.20.0.255", + "scope": "global", + "label": "eth0", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + }, + { + "family": "inet6", + "local": "fe80::acff:fe14:64", + "prefixlen": 64, + "scope": "link", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + }, + { + "ifindex": 3, + "ifname": "docker0", + "flags": [ + "NO-CARRIER", + "BROADCAST", + "MULTICAST", + "UP" + ], + "mtu": 1500, + "qdisc": "noqueue", + "operstate": "DOWN", + "group": "default", + "link_type": "ether", + "address": "02:42:04:21:6f:5d", + "broadcast": "ff:ff:ff:ff:ff:ff", + "addr_info": [ + { + "family": "inet", + "local": "172.17.0.1", + "prefixlen": 16, + "broadcast": "172.17.255.255", + "scope": "global", + "label": "docker0", + "valid_life_time": 4294967295, + "preferred_life_time": 4294967295 + } + ] + } + ] + JSON + expect(external_ipv4s).to eq ['172.20.0.100'] + end +end diff --git a/appliances/OneKE/appliance/traefik.rb b/appliances/OneKE/appliance/traefik.rb new file mode 100644 index 00000000..7b52c373 --- /dev/null +++ b/appliances/OneKE/appliance/traefik.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require 'base64' +require 'tmpdir' + +require_relative 'config.rb' +require_relative 'helpers.rb' + +def install_traefik(addon_dir = ONE_ADDON_DIR) + msg :info, 'Install Traefik' + fetch_traefik addon_dir +end + +def fetch_traefik(addon_dir = ONE_ADDON_DIR) + bash <<~SCRIPT + helm repo add traefik https://helm.traefik.io/traefik + helm repo update + SCRIPT + + manifest = <<~MANIFEST + --- + apiVersion: v1 + kind: Namespace + metadata: + name: traefik-system + --- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: one-traefik + namespace: kube-system + spec: + bootstrap: false + targetNamespace: traefik-system + chartContent: "%s" + valuesContent: | + deployment: + replicas: 2 + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: traefik + service: + type: NodePort + ports: + web: + nodePort: 32080 + websecure: + nodePort: 32443 + MANIFEST + + msg :info, "Generate Traefik addon manifest: #{ONEAPP_K8S_TRAEFIK_CHART_VERSION}" + Dir.mktmpdir do |temp_dir| + bash <<~SCRIPT + cd #{temp_dir}/ + helm pull traefik/traefik --version '#{ONEAPP_K8S_TRAEFIK_CHART_VERSION}' + SCRIPT + + manifest %= { chart_b64: slurp("#{temp_dir}/traefik-#{ONEAPP_K8S_TRAEFIK_CHART_VERSION}.tgz") } + + file "#{addon_dir}/one-traefik.yaml", manifest, overwrite: true + end +end diff --git a/appliances/OneKE/appliance/vnf.rb b/appliances/OneKE/appliance/vnf.rb new file mode 100644 index 00000000..37af622a --- /dev/null +++ b/appliances/OneKE/appliance/vnf.rb @@ -0,0 +1,144 @@ +# frozen_string_literal: true + +require_relative 'config.rb' +require_relative 'helpers.rb' +require_relative 'onegate.rb' + +def configure_vnf(gw_ipv4 = ONEAPP_VROUTER_ETH1_VIP0) + gw_ok = !gw_ipv4.nil? && ipv4?(gw_ipv4) + + if gw_ok + msg :debug, 'Configure default gateway (temporarily)' + bash "ip route replace default via #{gw_ipv4} dev eth0" + end + + msg :info, 'Install the vnf-restore service' + + file '/etc/systemd/system/vnf-restore.service', <<~SERVICE + [Unit] + After=network.target + + [Service] + Type=oneshot + ExecStart=/bin/sh -ec '#{gw_ok ? "ip route replace default via #{gw_ipv4} dev eth0" : ':'}' + + [Install] + WantedBy=multi-user.target + SERVICE + + # Make sure vnf-restore is triggered everytime one-context-reconfigure.service runs + file '/etc/systemd/system/one-context-reconfigure.service.d/vnf-restore.conf', <<~SERVICE + [Service] + ExecStartPost=/usr/bin/systemctl restart vnf-restore.service + SERVICE + + msg :info, 'Enable and start the vnf-restore service' + bash <<~SCRIPT + systemctl daemon-reload + systemctl enable vnf-restore.service --now + SCRIPT +end + +def vnf_supervisor_setup_backend(index = 0, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = 9345) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{lb_port}" + ] +end + +def vnf_control_plane_setup_backend(index = 1, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = 6443) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{lb_port}" + ] +end + +def vnf_ingress_setup_https_backend(index = 2, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = ONEAPP_VNF_HAPROXY_LB2_PORT) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + server_port = lb_port.to_i + 32_000 + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{server_port}" + ] +end + +def vnf_ingress_setup_http_backend(index = 3, + lb_ipv4 = ONEAPP_VROUTER_ETH0_VIP0, + lb_port = ONEAPP_VNF_HAPROXY_LB3_PORT) + + lb_ok = !lb_ipv4.nil? && ipv4?(lb_ipv4) && port?(lb_port) + + unless lb_ok + msg :error, "Invalid IPv4/port for VNF/HAPROXY/#{index}, aborting.." + exit 1 + end + + ipv4 = external_ipv4s + .reject { |item| item == lb_ipv4 } + .first + + msg :info, "Register VNF/HAPROXY/#{index} backend in OneGate" + + server_port = lb_port.to_i + 32_000 + + onegate_vm_update [ + "ONEGATE_HAPROXY_LB#{index}_IP=#{lb_ipv4}", + "ONEGATE_HAPROXY_LB#{index}_PORT=#{lb_port}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_HOST=#{ipv4}", + "ONEGATE_HAPROXY_LB#{index}_SERVER_PORT=#{server_port}" + ] +end diff --git a/appliances/lib/artifacts/vnf/ha-check-status.sh b/appliances/lib/artifacts/vnf/ha-check-status.sh new file mode 100644 index 00000000..ae892c9d --- /dev/null +++ b/appliances/lib/artifacts/vnf/ha-check-status.sh @@ -0,0 +1,100 @@ +#!/bin/sh + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2020, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +set -e + +WAITOUT=5 # seconds for status file to emerge + +if [ -f /run/keepalived.pid ] ; then + # delete old file if exists + rm -f /tmp/keepalived.data + + # prompt keepalived to create status file + pid=$(cat /run/keepalived.pid) + if [ -n "$pid" ] && kill -0 "$pid" ; then + kill -USR1 "$pid" + else + echo "KEEPALIVED: NOT RUNNING" + exit 1 + fi + + while [ "$WAITOUT" -gt 0 ] ; do + if [ -f /tmp/keepalived.data ] ; then + break + fi + + WAITOUT=$(( WAITOUT - 1 )) + sleep 1s + done + + if [ -f /tmp/keepalived.data ] ; then + instances=$(awk ' + { + if ($0 ~ /^[[:space:]]*VRRP Instance/) { + instance = $(NF); + state = "instance"; + } else if ($0 ~ /^[[:space:]]*VRRP Sync Group/) { + group_name = $(NF-1); + group_state = $(NF); + gsub(/,/, "", group_name); + gsub(/,/, "", state_state); + vgroup[group_name] = group_state; + } else if (state == "instance") { + if ($1 == "State") { + if ($(NF) == "MASTER") + vrrp[instance] = "MASTER"; + else + vrrp[instance] = "BACKUP"; + state = ""; + } + } + } + END { + for (i in vrrp) + printf("VRRP-INSTANCE(%s): %s\n", i, vrrp[i]); + for (i in vgroup) + printf("SYNC-GROUP(%s): %s\n", i, vgroup[i]); + + # workaround for changed behavior of Keepalived regarding the sync + # groups which are ignored and removed if they contain only one + # interface... + if ((length(vrrp) == 1) && (length(vgroup) == 0)) { + for (i in vrrp) + printf("SYNC-GROUP(vrouter): %s\n", vrrp[i]); + } + } + ' < /tmp/keepalived.data) + + if [ -n "$instances" ] ; then + # this means there is some vrrp instance + echo "KEEPALIVED: RUNNING" + echo "$instances" + else + # no vrrp instance - keepalived does nothing + echo "KEEPALIVED: RUNNING IDLE (NO INSTANCE)" + fi + + exit 0 + else + # no data - timeouted... + echo "KEEPALIVED: UNKNOWN (NO DATA)" + exit 1 + fi +fi + +exit 1 diff --git a/appliances/lib/artifacts/vnf/ha-failover.sh b/appliances/lib/artifacts/vnf/ha-failover.sh new file mode 100644 index 00000000..05ba16f8 --- /dev/null +++ b/appliances/lib/artifacts/vnf/ha-failover.sh @@ -0,0 +1,261 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2020, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + + +# shellcheck disable=SC1090 +# shellcheck disable=SC2034 +true + +# +# keepalived transition +# + +TYPE="$1" +NAME="$2" +TARGET_STATE="$3" +PRIORITY="$4" + +# +# globals +# + +# wait max two minutes for previous instance of this script to finish or abort +WAIT_ON_LOCK=120 + +# wait max five minutes for the service appliance script to finish or abort +WAIT_ON_SERVICE_SCRIPT=300 + +# this is our status file where we signal our state +HA_FAILOVER_STATUSFILE='/run/keepalived/ha-failover.status' + +# this is our extra logfile (along the syslog) to log our messages +HA_FAILOVER_LOGFILE='/var/log/ha-failover.log' + + +# +# functions +# + +# TODO: change service script so it can be sourced and we don't duplicate stuff +# here... + +ONE_SERVICE_DIR=/etc/one-appliance +ONE_SERVICE_SETUP_DIR="/opt/one-appliance" +ONE_SERVICE_CONTEXTFILE="${ONE_SERVICE_DIR}/context.json" +ONE_SERVICE_COMMON="${ONE_SERVICE_DIR}/service.d/common.sh" +ONE_SERVICE_APPLIANCE="${ONE_SERVICE_DIR}/service.d/appliance.sh" + +# source service appliance scripts +. "$ONE_SERVICE_COMMON" +. "$ONE_SERVICE_APPLIANCE" + +# args: +logmsg() +{ + _type="$1" + shift + + msg "$_type" "${CMD}[$$]:" "$@" 2>&1 | \ + logger --stderr -t HA_KEEPALIVED 2>> "$HA_FAILOVER_LOGFILE" +} + +started_transition_text() +{ + echo "${TARGET_STATE}: IN PROGRESS" +} + +completed_transition_text() +{ + echo "${TARGET_STATE}: DONE" +} + +aborted_transition_text() +{ + echo "${TARGET_STATE}: ABORTED" +} + +failed_transition_text() +{ + echo "${TARGET_STATE}: FAILED" +} + +on_exit() +{ + _status=$(cat "$HA_FAILOVER_STATUSFILE") + + if [ "$_status" != "$(completed_transition_text)" ] \ + && [ "$_status" != "$(aborted_transition_text)" ] \ + ; + then + failed_transition_text > "$HA_FAILOVER_STATUSFILE" + fi +} + + +# +# locking +# + +CMD=$(realpath "$0") + +if [ "${_KEEPALIVED_HA_NOTIFY_SCRIPT}" != "$CMD" ] ; then + logmsg info "Lock (args: $*) or wait (max ${WAIT_ON_LOCK} sec.)..." + exec env _KEEPALIVED_HA_NOTIFY_SCRIPT="$CMD" \ + flock -x -w "$WAIT_ON_LOCK" "$CMD" "$CMD" "$@" +fi + + +# +# main +# + +trap on_exit INT QUIT TERM EXIT + +# TODO: find a better solution to avoid the dead-lock with the appliance script +logmsg info "Firstly wait for the service appliance to finish (max ${WAIT_ON_SERVICE_SCRIPT} sec.)..." + +# check status file +_timeout="$WAIT_ON_SERVICE_SCRIPT" +while [ "$_timeout" -gt 0 ] ; do + _status=$(cat '/etc/one-appliance/status' 2>/dev/null) + + case "$_status" in + bootstrap_success) + # we can continue + break + ;; + *_failure) + logmsg error "Service appliance failed - ABORT" + exit 1 + ;; + *) + # we wait + sleep 1 + _timeout=$((_timeout - 1)) + ;; + esac +done + +if [ "$_timeout" -eq 0 ] ; then + logmsg error "Reached timeout waiting for service appliance - ABORT" + exit 1 +fi + +# save the current keepalived status +CURRENT_KEEPALIVED_STATUS=$("${VNF_KEEPALIVED_HA_STATUS_SCRIPT}" | \ + awk '/^SYNC-GROUP[(]vrouter[)]/ {print $NF}') + + +# TODO: +# verify that our transition is not stale otherwise abort... +# +# sometimes keepalived triggers notify script with a stale state and we need to +# abort the processing in such a case due to this strange and undesirable +# situation - so the result in the status file of this script won't be DONE but +# FAILED...FYI +# +# Example: +# Keepalived is in MASTER state but for some reason this script is started +# with argument set as BACKUP... +# +# This script is not a proper transition script - it is a notify script. That +# means it will be started as a notification that keepalived's state has +# changed and to signal which state it has now. +# +# So it has no effect on the keepalived cluster per se - in contrast with +# a proper transition script (not provided by keepalived AFAIK) which would +# be responsible for transfer of the cluster from one state to another (e.g.: +# BACKUP -> MASTER). +# +# From this we can see that if keepalived is MASTER but this script receive +# argument BACKUP then something shady is happenning... +# +# the reason for this is unknown as of this moment (to me) - it can be that +# keepalived does not guarantee the proper event queue - for that reason maybe +# the keepalived configuration should use 'notify_fifo' instead of 'notify'? +# +# Or simply...keepalived has a bug... +# +case "$CURRENT_KEEPALIVED_STATUS" in + MASTER|BACKUP|FAULT|STOP) + if [ "$TARGET_STATE" != "$CURRENT_KEEPALIVED_STATUS" ] ; then + logmsg warning "Keepalived cluster is ahead (${CURRENT_KEEPALIVED_STATUS}) - this transition is stale (${TARGET_STATE}) - ABORT" + aborted_transition_text > "$HA_FAILOVER_STATUSFILE" + exit 0 + fi + ;; +esac + +logmsg info "Started VNF transition to the state: ${TARGET_STATE}" + +started_transition_text > "$HA_FAILOVER_STATUSFILE" + +# we build ENABLED/DISABLED lists of VNFs +load_context "$ONE_SERVICE_CONTEXTFILE" +sortout_vnfs + +case "$TARGET_STATE" in + MASTER) + # (re)start requested VNFs/services except keepalived itself... + _vnfs=$(for _vnf in $ENABLED_VNF_LIST ; do echo "$_vnf" ; done \ + | sed '/^KEEPALIVED$/d' | tr '\n' ' ') + + # what if keepalived was reconfigured and stopped? + if is_running 'KEEPALIVED' ; then + # everything seems to be ok - we continue as intended + logmsg info "Restarting: ${_vnfs}" + + # reload/start services where it makes sense + for _vnf in ${_vnfs} ; do + if is_running "${_vnf}" ; then + reload_vnfs "$_vnf" + else + start_vnfs "$_vnf" + fi + done + else + # keepalived is not running... + _vnfs=$(for _vnf in $ALL_SUPPORTED_VNF_NAMES ; do echo "$_vnf" ; done \ + | sed '/^KEEPALIVED$/d' | tr '\n' ' ') + + logmsg warning "Keepalived process is not running anymore - ABORT" + logmsg info " No keepalived process - stop and disable all VNFs: ${_vnfs}" + stop_and_disable_vnfs "$_vnfs" + exit 1 + fi + ;; + BACKUP|FAULT|STOP) + # stop and disable all VNFs except keepalived itself... + _vnfs=$(for _vnf in $ALL_SUPPORTED_VNF_NAMES ; do echo "$_vnf" ; done \ + | sed '/^KEEPALIVED$/d' | tr '\n' ' ') + + logmsg info "Requested BACKUP state - stop and disable all VNFs: ${_vnfs}" + stop_and_disable_vnfs "$_vnfs" + ;; + *) + logmsg error "Unknown keepalived state: ${TARGET_STATE}" + exit 1 + ;; +esac + +# signal the end of the transition +logmsg info "VNF transition completed: ${TARGET_STATE}" + +completed_transition_text > "$HA_FAILOVER_STATUSFILE" + +exit 0 diff --git a/appliances/lib/artifacts/vnf/kea-config-generator b/appliances/lib/artifacts/vnf/kea-config-generator new file mode 100755 index 00000000..b5d4a2c2 --- /dev/null +++ b/appliances/lib/artifacts/vnf/kea-config-generator @@ -0,0 +1,902 @@ +#!/usr/bin/env python3 + +# --------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# --------------------------------------------------------------------------- # + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "kea-config-generator" +__summary__ = "ISC Kea configuration generator" +__uri__ = "https://github.com/OpenNebula/addon-kea-hooks" +__version__ = "1.1.3" +__author__ = "Petr Ospalý" +__email__ = "pospaly@opennebula.io" +__license__ = "Apache License, Version 2.0" +__copyright__ = "2019-2021 %s" % __author__ + + +import sys +import argparse +import re +import json +import base64 +import psutil +import ipaddress +import textwrap +import functools + + +# +# global defaults +# + +JSON_INDENT = 4 +DEFAULT_LOGFILE = "/var/log/kea-dhcp4.log" +DEFAULT_LEASE_TIME = 3600 +SUBNET_ID_MAX = 4294967294 # greater than zero and less than 4294967295 + + +# +# functions +# + +def msg(*args, columns=80): + """ Prints a message on stderr. + + It is a wrapper for print() function, it adds an error category to the + message and wraps the line to not exceed a certain column width. + + Examples: + > msg('ERROR', "Some string", "and another...") + ERROR: Some string and another... + + > msg('If no category is specified:', + "then the message is automatically prefixed with the 'UNKNOWN'", + columns=40) + UNKNOWN: If no category is specified: + then the message is + automatically prefixed with the + 'UNKNOWN' + + Attributes: + *args (tuple of strings): + It concatenates (join with a space) all the strings but with the + exception of the first which can be used as an error category: + ERROR, WARNING, OK, UNKNOWN + If such string is not used: 'UNKNOWN' will be prefixed implicitly. + + columns (int): + Column width after which the text is wrapped. + """ + + if len(args) and args[0].upper() in ('ERROR', 'WARNING', 'OK', 'UNKNOWN'): + msg_type = args[0].upper() + ':' + text = list(args[1:]) + else: + msg_type = 'UNKNOWN:' + text = list(args) + + text.insert(0, msg_type) + + indent = ' ' * (len(msg_type) + 1) + text = ' '.join(text) + text = textwrap.fill(text, width=columns, subsequent_indent=indent) + print(text, file=sys.stderr) + + +def get_networks(): + """ Returns all interfaces and all their addresses, like this: + [ + { + "iface": "lo", + "addrs": [ + { + "addr": "127.0.0.1", + "network": "127.0.0.0", + "prefix": 8, + "broadcast": null + } + ] + }, + ... + ] + """ + all_netinfs = [] + + for iface_name, addrs in psutil.net_if_addrs().items(): + iface = { + "iface": iface_name + } + ips = [] + + for addr in addrs: + ip = {} + if addr.family.name == "AF_INET": + ip["addr"] = addr.address + ip["network"] = str(ipaddress.IPv4Interface(addr.address + + '/' + addr.netmask).network.network_address) + ip["prefix"] = ipaddress.IPv4Interface(addr.address + + '/' + addr.netmask).network.prefixlen + ip["broadcast"] = addr.broadcast + ips.append(ip) + elif addr.family.name in ("AF_LINK", "AF_PACKET"): + iface["hwaddr"] = addr.address + + if ips != []: + iface["addrs"] = ips + all_netinfs.append(iface) + + return all_netinfs + + +def gen_subnets(params): + """ Returns subnets with a pool. + + - Subnet is deducted from network address and prefix. + - Pool is created as the whole subnet range minus: + 1. network address + 2. one above network address - the lowest address + 3. broadcast address/highest address + Broadcast address and the highest address are mostly the same. + """ + + def get_interface_map(interfaces): + """ Returns a map of interfaces. """ + + interface_map = {} + for iface in interfaces: + if iface == '*': + continue + + iface = iface.split("/") + if not interface_map.get(iface[0], []): + interface_map[iface[0]] = [] + if len(iface) > 1: + interface_map[iface[0]].append(iface[1]) + + return interface_map + + subnets = [] + subnet_list = [] # to simply lookup if subnet is already created + interfaces_map = get_interface_map(params.get("interfaces", [])) + + for netinf in params.get("my-network", []): + if interfaces_map: + # if explicit interfaces used then skip the others... + if netinf["iface"] not in interfaces_map: + continue + elif netinf["iface"] == "lo": + # if no explicit interfaces then skip loopback at least + continue + + for addr in netinf["addrs"]: + # if explicit interfaces used then verify used address... + if interfaces_map and interfaces_map[netinf["iface"]]: + if addr["addr"] not in interfaces_map[netinf["iface"]]: + continue + + subnet = {} + pools = [] + + new_subnet = addr["network"] + '/' + str(addr["prefix"]) + + # skip this if relevant subnet was already configured + if new_subnet in subnet_list: + continue + + # create list of all ips + subnet_range = ipaddress.ip_network(new_subnet) + + if subnet_range.num_addresses >= 4: + pool_start = subnet_range[2] # skip 0 and first ip + pool_end = subnet_range[-2] # exempt the broadcast ip + else: + msg('WARNING', "This subnet '%s' has no valid pool..." + "SKIPPED" % new_subnet) + continue + + if pool_start <= pool_end: + pool = {"pool": str(pool_start) + '-' + str(pool_end)} + pools.append(pool) + else: + msg('WARNING', "This subnet '%s' has no valid pool..." + "SKIPPED" % new_subnet) + continue + + # add the interface to be on the safe side - this is undocumented + # and I have found only one reference in official documentation: + # https://kea.readthedocs.io/en/v1_6_0/arm/dhcp4-srv.html#host-reservation-in-dhcpv4 + # + # this has a dubious usage when more than one interface share the + # same network... + # subnet["interface"] = netinf["iface"] + + # everything should be ok + subnet_list.append(new_subnet) + subnet["subnet"] = new_subnet + subnet["pools"] = pools + subnets.append(subnet) + + return subnets + + +def return_globaloptions(params): + + def fix_nameservers(options=[], nameservers=[]): + """ Returns options with unified nameserver option-data. + + It leaves all option-data items intact except 'domain-name-servers'. + Those will be filtered and appended to the 'nameservers' argument. + Found nameservers are deduplicated (except the argument 'nameservers'). + """ + + # filter-out domain-name-servers option-data + # and append new nameservers to nameservers from argument + filtered_options = [] + for option in options: + if option.get("name") == "domain-name-servers": + option_nameservers = re.sub(r'\s+', + '', + option["data"]).split(",") + for nameserver in option_nameservers: + if nameserver not in nameservers: + nameservers.append(nameserver) + else: + filtered_options.append(option) + + option = {} + option["name"] = "domain-name-servers" + option["data"] = ", ".join(nameservers) + + # put all together + options = filtered_options + options.append(option) + + return options + + def fix_routers(options=[], routers=[]): + """ Returns options with global routers option-data (if used). """ + + option = {} + option["name"] = "routers" + option["data"] = ", ".join(routers) + + options.append(option) + + return options + + # explicitly configured option-data or empty list + options = params.get("option-data", []) + + # explicitly configured nameservers - merge and extend + if params.get("domain-name-servers"): + options = fix_nameservers(options, params["domain-name-servers"]) + + # explicitly configured routers + if params.get("routers"): + options = fix_routers(options, params["routers"]) + + return options + + +def return_subnets(params): + """ Returns list of subnets. + + It also optionally supports subnet-id and workaround problem with Kea + leasing its own addresses - this is done via flex-id reservation + (in this case more of a blacklist). Flex-id trick is a strange hack and + I am not sure how future-proof it is - but other solution is to break the + one pool into many fragmented... + + Output example: + [ + { + "id": , + "interface": "eth1", + "subnet": "192.0.2.0/24", + "pools": [ { "pool": "192.0.2.1 - 192.0.2.200" } ], + "reservations": [ + { + "flex-id": "'DO-NOT-LEASE-192.0.2.202'", + "ip-address": "192.0.2.202" + } + ] + },... + ] + """ + + def fix_subnets(subnets=[], subnet_id=None): + """ Returns sorted subnets so subnet IDs can grow consistently. """ + + # list all subnets and sort them + subnet_list = [] + for subnet in subnets: + if subnet["subnet"] not in subnet_list: + subnet_list.append(subnet["subnet"]) + else: + msg('ERROR', "Duplicated subnet: '%s'" % subnet["subnet"]) + sys.exit(1) + + subnet_list.sort() + + # put subnet objects in the right order + sorted_subnets = [] + for subnet in subnet_list: + sorted_subnets.append(next(x for x in subnets + if x["subnet"] == subnet)) + + # add subnet-id if requested in args + if isinstance(subnet_id, int): + sorted_subnets[-1]["id"] = subnet_id + subnet_id += 1 + + return sorted_subnets + + def get_reservation_map(netinfs, vips): + """ Returns a map of reservations. """ + + reservations_map = {} + + for netinf in netinfs: + for addr in netinf["addrs"]: + new_subnet = addr["network"] + '/' + str(addr["prefix"]) + + # add reservation + if reservations_map.get(new_subnet): + reservations_map[new_subnet].append(addr["addr"]) + else: + reservations_map[new_subnet] = [addr["addr"]] + + for vip in vips: + vip_object = ipaddress.IPv4Address(vip) + for subnet in reservations_map: + subnet_object = ipaddress.ip_network(subnet) + if vip_object in subnet_object: + if vip not in reservations_map[subnet]: + reservations_map[subnet].append(vip) + + return reservations_map + + def fix_reservations(subnets, netinfs, vips): + """ Returns subnets with added reservations. """ + + # we gather all addresses and map them to subnets + reservations_map = get_reservation_map(netinfs, vips) + + # and we add flex-id reservations + for subnet in subnets: + addrs = reservations_map.get(subnet["subnet"], []) + for found_subnet in reservations_map: + subnet_range = ipaddress.ip_network(subnet["subnet"]) + for addr in reservations_map.get(found_subnet, []): + ip_addr = ipaddress.ip_address(addr) + if ip_addr in subnet_range and addr not in addrs: + addrs.append(addr) + + addrs.sort() + reservations = subnet.get("reservations", []) + for addr in addrs: + reservation = { + "flex-id": "'DO-NOT-LEASE-%s'" % addr, + "ip-address": addr + } + reservations.append(reservation) + + subnet["reservations"] = reservations + subnet["reservation-mode"] = "all" + + return subnets + + # explicitly configured subnets or generated + subnets = params.get("subnet4", []) + if not len(subnets): + subnets = gen_subnets(params) + + # sort subnets + subnets = fix_subnets(subnets, params.get("subnet-id")) + + # Here follows a hack to forbid Kea to lease its own addresses... + subnets = fix_reservations(subnets, params.get("my-network", []), + params.get("vips", [])) + + return subnets + + +def return_hooks(params): + return params.get("hooks-libraries", []) + + +def return_interfaces(params): + """ Returns list of interfaces. + + Interface name: e.g. "eth0" or "eth0/192.0.2.1" + + Output example: + [ "eth0", "eth1" ] + or + ["*"] for all (and all ips...) + """ + + # explicitly configured interfaces + if params.get("interfaces") is not None: + interfaces = [] + for iface in params["interfaces"]: + if iface not in interfaces: + interfaces.append(iface) + return interfaces + + # failsafe with listen-on-all interfaces (and IPs)... + if not len(params.get("my-network", [])): + return ["*"] + + # parse networks and listen only on one address per interface (!) + # we expect that all listed interfaces are those with at least one ip... + interfaces = [] + for netinf in params["my-network"]: + interfaces.append(netinf["iface"] + "/" + netinf["addrs"][0]["addr"]) + + # extend the interface list with VIPs + for vip in params.get("vips", []): + vip_object = ipaddress.IPv4Address(vip) + found = False + for netinf in params.get("my-network", []): + for addr in netinf["addrs"]: + subnet = addr["network"] + '/' + str(addr["prefix"]) + subnet_object = ipaddress.ip_network(subnet) + if vip_object in subnet_object: + new_iface = netinf["iface"] + "/" + vip + if new_iface not in interfaces: + interfaces.append(new_iface) + found = True + break + if found: + break + + interfaces.sort() + + return interfaces + + +def return_database(params): + # explicitly configured lease database + if params.get("lease-database") is not None: + return params["lease-database"] + + lfc_interval = 2 * return_leasetime(params) + database = { + "type": "memfile", + "persist": True, + "lfc-interval": lfc_interval + } + + return database + + +def return_leasetime(params): + return params.get("lease-time", DEFAULT_LEASE_TIME) + + +def return_authoritative(params): + return params.get("authoritative", False) + + +def return_loggers(params): + # explicitly configured loggers + if params.get("loggers") is not None: + return params["loggers"] + + logfile = params.get("logfile", DEFAULT_LOGFILE) + loggers = [ + { + "name": "kea-dhcp4", + "output_options": [ + { + "output": logfile + } + ], + "severity": "INFO", + "debuglevel": 0 + } + ] + + return loggers + + +def return_dhcp4(params): + # lease_time = return_leasetime(params) + # rebind_timer = lease_time // 2 # rebind is T2 + # renew_timer = rebind_timer // 2 # renew is T1 + + dhcp4 = { + "interfaces-config": { + "interfaces": return_interfaces(params) + }, + "authoritative": return_authoritative(params), + "option-data": return_globaloptions(params), + "subnet4": return_subnets(params), + "lease-database": return_database(params), + "sanity-checks": { + "lease-checks": "fix-del" + }, + "valid-lifetime": return_leasetime(params), + "calculate-tee-times": True, + "loggers": return_loggers(params), + "hooks-libraries": return_hooks(params) + } + + if params.get("unix-socket"): + dhcp4["control-socket"] = { + "socket-type": "unix", + "socket-name": params["unix-socket"] + } + + return dhcp4 + + +def generate_config(params): + config = { + "Dhcp4": return_dhcp4(params) + } + + return config + + +def get_params(): + """ Returns params object if CLI arguments are valid or None. + + It will do simple sanity checks for a few things: + 1. that we have valid JSON values + 2. that those JSON values have correct type + 3. that the content of the value has at least some mandatory things + + The authoritative validation is done by Kea itself when it will try to run + with our created config. But this way we can at least hint the user if + there are obvious errors in parameters. + """ + + def validate_ipv4(arg_str): + try: + ipaddress.IPv4Address(arg_str) + except ValueError: + raise argparse.ArgumentTypeError( + "'%s' is not a valid IPv4 address!" % arg_str) + + return arg_str + + def validate_interface(arg_str): + # / + iface = arg_str.split("/") + + # just interface name + if len(iface) == 1: + return arg_str + # interface name and ip + elif len(iface) == 2: + pass + # something else + else: + raise argparse.ArgumentTypeError( + "'%s' is not a valid interface designation!" % arg_str) + + # is interface an empty string? + if not len(iface[0]): + raise argparse.ArgumentTypeError( + "'%s' has empty interface name!" % arg_str) + + try: + ipaddress.IPv4Address(iface[1]) + except ValueError: + raise argparse.ArgumentTypeError( + "'%s' is not a valid IPv4 address!" % iface[1]) + + return arg_str + + def validate_subnet_id(arg_str): + try: + subnet_id = int(arg_str) + except ValueError: + raise argparse.ArgumentTypeError( + "'%s' is not a valid integer number!" % arg_str) + + if (subnet_id < 1) or (subnet_id > SUBNET_ID_MAX): + raise argparse.ArgumentTypeError( + "Subnet ID must be in the range: 1-%d!" % SUBNET_ID_MAX) + + return subnet_id + + def validate_json_decorator(fu): + @functools.wraps(fu) + def wrap_validate_json(json_str, *args, **kwargs): + # first we try to decode base64 + try: + json_base64 = base64.b64decode(json_str, validate=True) + json_object = json.loads(json_base64) + json_str = json_base64 + except Exception: + # now we try plain json + try: + json_object = json.loads(json_str) + except json.JSONDecodeError: + raise argparse.ArgumentTypeError( + "'%s' is not a valid JSON (or base64 encoded JSON)!" + % json_str) + + return fu(json_str, json_object, *args, **kwargs) + + return wrap_validate_json + + @validate_json_decorator + def validate_json(arg_str, json_object=None): + return json_object + + @validate_json_decorator + def validate_lease_database(arg_str, database=None): + if not isinstance(database.get("type"), str): + raise argparse.ArgumentTypeError( + "Lease database object has to have at least the 'type' field" + " with a database backend!") + + return database + + @validate_json_decorator + def validate_option_data(arg_str, option_data=None): + if (not (((isinstance(option_data.get("name"), str) + and len(option_data["name"])) + or isinstance(option_data.get("code"), int)) + and (isinstance(option_data.get("data"), str)))): + # ugly python syntax + raise argparse.ArgumentTypeError( + "Option-data object has to have 'name' or 'code' field and" + " 'data'!") + + if (option_data.get("name") == "domain-name-servers" or + option_data.get("code") == 6): + # ugly python syntax + ips = [ip.strip() for ip in option_data["data"].split(",")] + for ip in ips: + validate_ipv4(ip) + + if (option_data.get("name") == "routers" or + option_data.get("code") == 3): + # ugly python syntax + ips = [ip.strip() for ip in option_data["data"].split(",")] + for ip in ips: + validate_ipv4(ip) + + return option_data + + @validate_json_decorator + def validate_subnet4(arg_str, subnet=None): + if (not ((isinstance(subnet.get("subnet"), str) + and len(subnet["subnet"])) + and (isinstance(subnet.get("pools"), list) + and len(subnet["pools"])))): + # ugly python syntax + raise argparse.ArgumentTypeError( + "Subnet4 object has to have at least 'subnet' and 'pools'" + " (non-empty list) fields!") + + try: + ipaddress.ip_network(subnet["subnet"]) + except ValueError: + raise argparse.ArgumentTypeError( + "Subnet '%s' is not a valid network designation." + % subnet["subnet"]) + + for pool in subnet["pools"]: + if (not (isinstance(pool, dict) and + isinstance(pool.get("pool"), str))): + # ugly python syntax + raise argparse.ArgumentTypeError( + "Pools must contain a list of dicts with a value 'pool'!") + + pool = pool["pool"] + + ips = [pool.strip() for pool in pool.split("-")] + if len(ips) != 2: + raise argparse.ArgumentTypeError( + "Pool '%s' is not in ' - ' format!" + % pool) + + validate_ipv4(ips[0]) + validate_ipv4(ips[1]) + + start_ip = ipaddress.IPv4Address(ips[0]) + end_ip = ipaddress.IPv4Address(ips[1]) + + if start_ip > end_ip: + raise argparse.ArgumentTypeError( + "Start IP in the pool is greater than the end IP!") + + option_data = subnet.get("option-data", []) + for option in option_data: + validate_option_data(json.dumps(option)) + + return subnet + + @validate_json_decorator + def validate_hook(arg_str, hook=None): + if (not (isinstance(hook.get("library"), str) + and len(hook["library"]))): + # ugly python syntax + raise argparse.ArgumentTypeError( + "Hook object has to have at least the 'library' field" + " with a filename!") + + return hook + + # parse CLI arguments + parser = argparse.ArgumentParser( + description="ISC Kea dhcp4 config generator - it generates dhcp4" + " config file and its content can be modified by varies options." + " The result is then simply printed into stdout.") + + parser.add_argument('-v', '--version', action='version', + version='%(prog)s ' + __version__) + + parser.add_argument("-t", "--lease-time", + required=False, + metavar="", + type=int, + help="Life time of a lease in seconds" + " (Default: %d)" % DEFAULT_LEASE_TIME) + parser.add_argument("-a", "--authoritative", + required=False, + action='store_const', + const=True, + help="Start Kea server as authoritative for all" + " networks (Global, Default: false)") + parser.add_argument("-l", "--logfile", + required=False, + metavar="", + help="Name of the file for log messages" + " (Default: %s)" % DEFAULT_LOGFILE) + parser.add_argument("-d", "--lease-database", + required=False, + metavar="", + type=validate_lease_database, + help="JSON value representing lease database object" + " (Default: type=memfile). The whole JSON can be" + " encoded in base64.") + parser.add_argument("-i", "--interface", + dest="interfaces", + required=False, + metavar="]>", + action='append', + type=validate_interface, + help="Name of the interface on which to listen" + " (Global, Default: '*'; e.g.: 'eth0' or better yet:" + " 'eth0/'). This argument can be used multiple" + " of times.") + parser.add_argument("-n", "--domain-name-server", + dest="domain_name_servers", + required=False, + metavar="", + action='append', + type=validate_ipv4, + help="Domain name server IPv4 address (Global," + " Default: None). This argument can be used multiple" + " of times. These nameservers take precedence over" + " others found in option-data (if used).") + parser.add_argument("-r", "--router", + dest="routers", + required=False, + metavar="", + action='append', + type=validate_ipv4, + help="Router's IPv4 address (Global, Default: None)." + " This argument can be used multiple of times.") + parser.add_argument("-o", "--option-data", + required=False, + metavar="", + action='append', + type=validate_option_data, + help="JSON value representing option-data object" + " (Global, Default: None). This argument can be used" + " multiple of times. The whole JSON can be encoded in" + " base64.") + parser.add_argument("-s", "--subnet4", + required=False, + metavar="", + action='append', + type=validate_subnet4, + help="JSON value representing subnet4 object" + " (Default: Auto-Generated). This argument can be" + " used multiple of times. The whole JSON can be" + " encoded in base64.") + parser.add_argument("-x", "--hook", + dest="hooks_libraries", + required=False, + metavar="", + action='append', + type=validate_hook, + help="JSON value representing hook object" + " (Default: None). This argument can be used multiple" + " of times. The whole JSON can be encoded in base64.") + parser.add_argument("-L", "--logger", + dest="loggers", + required=False, + metavar="", + action='append', + type=validate_json, + help="JSON value representing logger object" + " (Default: file='%s'). This argument can be used" + " multiple of times. If used then --logfile is" + " ignored. The whole JSON can be encoded in base64." + % DEFAULT_LOGFILE) + parser.add_argument("-I", "--subnet-id", + required=False, + metavar="", + type=validate_subnet_id, + help="Starting ID number for subnets (all configured" + " subnets are incremented by one from this value -" + " when the highest id is reached then it will reset" + " and start from one again)." + " (Default: None/Auto, Min-Max: 1-%d)" + % SUBNET_ID_MAX) + parser.add_argument("-F", "--floating-ip", + dest="vips", + required=False, + metavar="", + action='append', + type=validate_ipv4, + help="Virtual floating IPv4 address (Default: None)." + " This argument can be used multiple of times.") + parser.add_argument("-u", "--unix-socket", + required=False, + metavar="", + help="Filename for the unix control socket" + " (Default: None)") + + # validate arguments and feed them to the params object + args = parser.parse_args() + + params = {} + + # we are replacing underscore created by python with '-' to align with + # the actual config names + for key, value in vars(args).items(): + if value: + params[key.replace('_', '-')] = value + + return params + + +def main(): + # parse arguments, quasi-validate content and feed it to params object + params = get_params() + + # examine this machine networks and store it to params too + params["my-network"] = get_networks() + + # generate and print config + # print(json.dumps(params, indent=JSON_INDENT), file = sys.stderr) + print(json.dumps(generate_config(params), indent=JSON_INDENT)) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/appliances/lib/artifacts/vnf/one-vnf/lib/appliance.rb b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance.rb new file mode 100644 index 00000000..0425b9b8 --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance.rb @@ -0,0 +1,153 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# Class to load plugins and trigger their actions +class Appliance + + attr_reader :logger, :config_file, :config, :plugins, :plugins_dir + + def initialize(config_file, action = :run) + @config_file = config_file + @plugins_dir = '/opt/one-appliance/lib/one-vnf/lib/appliance/plugin' + @logger = Syslog::Logger.new(File.basename($PROGRAM_NAME)) + @plugins = [] + + load_config + load_plugins if action == :run + end + + def load_config + @logger.debug "Loading configuration from #{@config_file}" + + f = File.read(@config_file) + @config = JSON.parse(f) + rescue StandardError => e + @logger.fatal e.to_s + raise + end + + def save_config + @logger.debug "Saving configuration to #{@config_file}" + + File.open(@config_file, "w") do |f| + f.puts JSON.pretty_generate(@config) + end + rescue StandardError => e + @logger.fatal e.to_s + raise + end + + def get_plugin(name) + if @config.key?(name) + return @config[name]['enabled'] + end + return false + end + + def set_plugin(name, state) + if @config.key?(name) + @config[name]['enabled'] = state + return true + end + return false + end + + def run + @logger.info('Entering plugins execution loop') + + while sleep 1 + @plugins.each do |plugin| + next unless plugin.ready? + + begin + plugin.run + rescue StandardError => e + msg = "Plugin #{plugin.name} run error - #{e.message}" + @logger.error msg + STDERR.puts msg + e.backtrace.each do |line| + @logger.error line + STDERR.puts line + end + end + end + end + end + + def reconfigure + @logger.info('Reconfiguring plugins') + + load_config + + @plugins.each do |plugin| + was_enabled = plugin.enabled + + # reconfigure and cleanup if disabled + plugin.configure(@config) + plugin.cleanup if was_enabled && !plugin.enabled + end + end + + def cleanup + @logger.info('Cleaning up plugins') + + @plugins.each do |plugin| + next unless plugin.enabled + + plugin.cleanup + end + end + + private + + def load_plugins + # check state before loading plugins + base_constants = Object.constants + + Dir["#{@plugins_dir}/*.rb"].sort.each do |f| + @logger.debug("Loading code from #{f}") + require f + end + + # shamelessly copied from: + # https://joshrendek.com/2013/07/a-simple-ruby-plugin-system/ + # Iterate over each symbol in the object space + Object.constants.each do |klass| + next if base_constants.include?(klass) + + # Get the constant from the Kernel using the symbol + const = Kernel.const_get(klass) + if const.respond_to?(:superclass) && (const.superclass == Appliance::Plugin) + # @class_plugins << const + @plugins << const.new(@config, @logger) + end + end + + raise StandardError, 'No plugins loaded' if @plugins.empty? + + @logger.info("Available plugins - #{plugins.map {|i| i.class }.join(', ')}") + rescue StandardError => e + msg = "Plugins load error - #{e.message}" + @logger.fatal msg + STDERR.puts msg + e.backtrace.each do |line| + @logger.fatal line + STDERR.puts line + end + raise + end + +end diff --git a/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin.rb b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin.rb new file mode 100644 index 00000000..b175b7e7 --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin.rb @@ -0,0 +1,59 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# Generic plugin interface +class Appliance::Plugin + + attr_reader :name, :enabled, :logger + + def initialize(name, app_config, logger) + @name = name + @logger = logger + @timer = 0 + + configure(app_config) + end + + def configure(app_config) + # store only configuration for plugin + @config = app_config[@name] + @config ||= {} + + @enabled = @config['enabled'] + @enabled ||= false + + @refresh_rate = 60 + @refresh_rate = Integer(@config['refresh-rate']) if @config.key?('refresh-rate') + end + + def run; end + + def cleanup; end + + def ready? + return false unless @enabled + + @timer += 1 + + if @timer >= @refresh_rate + @timer = 0 + return true + end + + false + end + +end diff --git a/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/dummy.rb b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/dummy.rb new file mode 100644 index 00000000..40ccdb1e --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/dummy.rb @@ -0,0 +1,39 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# example of a plugin implementation +class Dummy < Appliance::Plugin + + # + # plugin interface + # + + def initialize(app_config, logger) + super('dummy', app_config, logger) + end + + def run + # here is the place for the main logic of a plugin + logger.info "Dummy VNF is executing 'run' action now..." + end + + def cleanup + # this is executed on the one-vnf service termination or when the VNF + # plugin is disabled/stopped + logger.info 'Dummy VNF is doing cleanup now...' + end + +end diff --git a/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/haproxy.rb b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/haproxy.rb new file mode 100644 index 00000000..3c62f439 --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/haproxy.rb @@ -0,0 +1,795 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# rubocop:disable Style/Next +# rubocop:disable Style/RedundantReturn + +HAPROXY_YML = '/etc/haproxy/haproxy.yml' +HAPROXY_CFG = '/etc/haproxy/haproxy.cfg' + +# Haproxy VNF plugin +class Haproxy < Appliance::Plugin + + # + # plugin interface + # + + def initialize(app_config, logger) + super('haproxy', app_config, logger) + end + + def configure(app_config) + super + + # TODO: how to treat interfaces? Filter out LB addresses? + + # list of LB interfaces (and by extension their vnets via NIC ids) + @ifaces = [] + if @config.key?('interfaces') + @ifaces = @config['interfaces'] + end + + # TODO: is this naming scheme always valid: == ETH ? + # This will create dict such as this: + # { 0: "eth0", 3: "eth1" } + @nic_ids = {} + @ifaces.each do |nic| + @nic_ids[nic['one-name'].delete('^0-9')] = nic['real-name'] + end + + # + # prepare loadbalancer variables + # + + # TODO: do sanity checks + + # TODO: this will erase the old on reconfigure + # the following uses lb hash (ip:port:proto;) as keys + @lbs = {} + @static_backend_servers = {} + @dynamic_backend_servers = {} + + # each lb must have unique index + @lb_indices = {} + + @lb_configs = nil + if @config.key?('lbs') + @lb_configs = @config['lbs'] + unless @lb_configs.is_a?(Array) + logger.error "VNF HAPROXY: List of LBs must be an array - ABORT..." + return -1 + end + end + + @haproxy_onegate_enabled = nil + if @config.key?('onegate') && (!!@config['onegate'] == @config['onegate']) + @haproxy_onegate_enabled = @config['onegate'] + end + + # + # check that this plugin is actually enabled... + # + + # possibly skip rest of the configure section + if @config.key?('enabled') && (!!@config['enabled'] == @config['enabled']) + unless @config['enabled'] + logger.debug 'VNF HAPROXY: HAProxy plugin is disabled - no LB will be configured...' + return 0 + end + else + logger.debug "VNF HAPROXY: HAProxy plugin is not enabled or value is not boolean (#{@config['enabled']}) - no LB will be configured..." + return 0 + end + + # + # loop through config and create all validated lbs + # + + # TODO: improve sanity checks + + @lb_configs.each do |lb_config| + lb_hash, lb = configure_loadbalancer(@lb_indices, lb_config) + next unless lb + + lb = deploy_loadbalancer(lb, :add) + unless lb && lb['status'] == :deploy_success + logger.debug "VNF HAPROXY: Failed to setup LoadBalancer: #{lb_hash} - skipping..." + next + end + + # + # prepare global variables + # + + # store validated lb + + if @lbs.key?(lb_hash) + logger.debug "VNF HAPROXY: Duplicit LoadBalancer (#{lb_hash}) - skipping..." + next + else + @lbs[lb_hash] = lb + end + + unless @static_backend_servers.key?(lb_hash) + @static_backend_servers[lb_hash] = {} + end + + unless @dynamic_backend_servers.key?(lb_hash) + @dynamic_backend_servers[lb_hash] = {} + end + + # + # add static backend servers + # + + if lb_config.key?('backend-servers') + backend_servers = lb_config['backend-servers'] + else + next + end + + unless backend_servers.is_a?(Array) && (backend_servers.count > 0) + logger.debug 'VNF HAPROXY: No static backend servers to configure for this LB' + next + end + + backend_servers.each do |server_config| + server_hash, backend_server = create_backend_server(server_config, lb) + + unless backend_server + logger.debug "VNF HAPROXY: Backend server config is incomplete - skipping..." + next + end + + backend_server = deploy_backend_server(backend_server, :add) + unless backend_server && backend_server['status'] == :deploy_success + logger.debug "VNF HAPROXY: Failed to setup backend server: #{server_hash} - skipping..." + next + end + + # save the static backend server to track changes for refresh + @static_backend_servers[lb_hash][server_hash] = backend_server + + # TODO: signal that backend server was successfully deployed + end + end + end + + def run + # + # Dynamic backend servers (OneGate) + # + + # no need to poll OneGate or monitor if we don't have any LB + return 0 unless @lbs.count > 0 + + # + # search for dynamic backend servers if OneGate is enabled + # + + if @haproxy_onegate_enabled + @dynamic_backend_servers, rc = refresh_dynamic_backend_servers( + @lbs, + @static_backend_servers, + @dynamic_backend_servers) + + unless rc == 0 + logger.debug 'VNF HAPROXY: Failed to refresh dynamic backend servers - check OneGate setup...' + end + end + + # + # Refresh / monitoring of backend servers section + # + + # parse current LVS config + @active_backend_servers = get_active_backend_servers(@lbs) + + # walk through all LBs and re-add backend servers or remove dead ones + refresh_active_backend_servers( + @lbs, + @active_backend_servers, + @static_backend_servers, + @dynamic_backend_servers) + end + + private + + # + # other internal methods + # + + def execute_cmd(cmd_str, logme = true) + stdout, stderr, rc = Open3.capture3(cmd_str) + if (rc.exitstatus != 0) && logme + logger.error "VNF HAPROXY ERROR: #{stdout + stderr}" + end + return stdout, rc + end + + def read_haproxy_yml + if File.exist?(HAPROXY_YML) + return YAML.safe_load File.read(HAPROXY_YML) + else + # default "empty" config + return { + 'global' => [ + 'log /dev/log local0', + 'log /dev/log local1 notice', + 'stats socket /var/run/haproxy.sock mode 666 level admin', + 'stats timeout 120s', + 'user haproxy', + 'group haproxy', + 'daemon' + ], + 'defaults' => [ + 'log global', + 'retries 3', + 'maxconn 2000', + 'timeout connect 5s', + 'timeout client 120s', + 'timeout server 120s' + ], + 'frontend' => {}, + 'backend' => {} + } + end + end + + def write_haproxy_yml(config) + File.write HAPROXY_YML, YAML.dump(config) + end + + def write_haproxy_cfg(config = nil, indent = 4) + indent, output = ' ' * indent, '' + + if config.nil? or config.empty? + config = YAML.safe_load File.read(HAPROXY_YML) + end + + config + .reject {|section| %w[frontend backend].include? section} + .each do |section, options| + output << section << "\n" + options.each {|option| output << indent << option << "\n"} + end + config + .select {|section| %w[frontend].include? section} + .each do |section, names| + names.each do |name, value| + output << "#{section} #{name}" << "\n" + value['options'].each {|option| output << indent << option << "\n"} + end + end + config + .select {|section| %w[backend].include? section} + .each do |section, names| + names.each do |name, value| + output << "#{section} #{name}" << "\n" + value['options'].each {|option| output << indent << option << "\n"} + value['server'].each do |server, command| + output << indent << "server #{server} #{command}" << "\n" + end + end + end + + File.write HAPROXY_CFG, output + end + + def reload_haproxy + write_haproxy_cfg + _, rc = execute_cmd('rc-service haproxy start && rc-service haproxy reload') + return rc.exitstatus + end + + # https://www.haproxy.com/documentation/hapee/latest/onepage/management/#9.3 + def haproxy_show_servers_state + sock = UNIXSocket.new '/var/run/haproxy.sock' + sock.puts 'show servers state' + + version = sock.readline.rstrip! + raise 'haproxy runtime api :show servers state: unsupported version' unless version == '1' + + headers = sock.readline.rstrip!.split[1..] + + backends = {} + while row = sock.readline.rstrip! + next if row.empty? + map = headers.zip(row.split).to_h + (backends[map['be_name']] ||= {})[map['srv_name']] = map + end + rescue EOFError # Haproxy closes the connection unless 'prompt' is sent + backends + ensure + sock.close + end + + def gen_lb_hash(lb) + return "#{lb['address']}:#{lb['port']}".unpack('H*')[0] + end + + def gen_bs_hash(server) + return "#{server['server-host']}:#{server['server-port']}".unpack('H*')[0] + end + + def configure_loadbalancer(lb_indices, lb_config) + # gather lb info + lb = {} + lb['address'] = nil + lb['port'] = nil + + # unique index + if lb_config.key?('index') && lb_config['index'].is_a?(Integer) + if lb_indices.key?(lb_config['index'].to_i) + logger.debug "VNF HAPROXY: Duplicit LoadBalancer index (#{lb_config['index']}) - skipping..." + return nil, nil + else + lb_indices[lb_config['index'].to_i] = true + end + else + logger.debug "VNF HAPROXY: LoadBalancer is missing integer index - skipping..." + return nil, nil + end + + if lb_config.key?('lb-address') && !lb_config['lb-address'].to_s.strip.empty? + lb['address'] = lb_config['lb-address'].to_s.strip + else + logger.debug "VNF HAPROXY: LoadBalancer is missing address - skipping..." + return nil, nil + end + + if lb_config.key?('lb-port') && !lb_config['lb-port'].to_s.strip.empty? + lb['port'] = lb_config['lb-port'].to_i + end + + # port sanity check + unless lb['port'] + logger.debug "VNF HAPROXY: Port ('#{lb['port']}') must be set - skipping..." + return nil, nil + end + + # + # return loadbalancer with hash/index + # + + lb_hash = gen_lb_hash(lb) + + return lb_hash, lb + end + + # creates just a stub for internal usage + def create_loadbalancer(lb_config) + # gather lb info + lb = {} + lb['address'] = nil + lb['port'] = nil + + if lb_config.key?('lb-address') && !lb_config['lb-address'].to_s.strip.empty? + lb['address'] = lb_config['lb-address'].to_s.strip + else + return nil, nil + end + + if lb_config.key?('lb-port') && !lb_config['lb-port'].to_s.strip.empty? + lb['port'] = lb_config['lb-port'].to_i + end + + # port v protocol sanity check + unless lb['port'] + return nil, nil + end + + # + # return loadbalancer with hash/index + # + + lb_hash = gen_lb_hash(lb) + + return lb_hash, lb + end + + # sanitize a server config and return validated backend server + def create_backend_server(server_config, lb) + server = {} + server['lb'] = lb + server['server-host'] = nil + server['server-port'] = nil + + # TODO: sanity checks - eg. port must be integer ('x'.to_i makes zero...) + if server_config.key?('server-host') && !server_config['server-host'].to_s.strip.empty? + server['server-host'] = server_config['server-host'].to_s.strip + else + logger.debug 'VNF HAPROXY: Missing mandatory backend server host - skipping...' + return nil, nil + end + + if server_config.key?('server-port') && !server_config['server-port'].to_s.strip.empty? + unless server_config['server-port'].to_i >= 0 + logger.debug 'VNF HAPROXY: Backend server port must be an integer - skipping...' + return nil, nil + end + server['server-port'] = server_config['server-port'].to_i + end + + # + # return backend server with hash/index + # + + bs_hash = gen_bs_hash(server) + + return bs_hash, server + end + + def deploy_loadbalancer(lb, status = :add) + lb['status'] = status + + unless [:add, :update].include? lb['status'] + logger.error "VNF HAPROXY: This is a bug: wrong internal state for deploy of LoadBalancer (status: #{lb['status']})..." + return nil + end + + lb_hash = gen_lb_hash(lb) + + config = read_haproxy_yml + config['frontend'][lb_hash] = { + 'options' => [ + 'mode tcp', + "bind 0.0.0.0:#{lb['port']}", + "default_backend #{lb_hash}" + ] + } + config['backend'][lb_hash] = { + 'options' => [ + 'mode tcp', + 'balance roundrobin', + 'option tcp-check' + ], + 'server' => {} + } + + write_haproxy_yml config + write_haproxy_cfg config + + rc = reload_haproxy + if rc == 0 + lb['status'] = :deploy_success + else + lb['status'] = :deploy_fail + end + + return lb + end + + def deploy_backend_server(server, status = :add) + server['status'] = status + + unless [:add, :update].include? server['status'] + logger.error "VNF HAPROXY: This is a bug: wrong internal state for deploy of backend server (status: #{server['status']})..." + return nil + end + + lb_hash, bs_hash = gen_lb_hash(server['lb']), gen_bs_hash(server) + + config = read_haproxy_yml + + current = config.dig 'backend', lb_hash, 'server', bs_hash + update = "#{server['server-host']}:#{server['server-port']} check observe layer4 error-limit 50 on-error mark-down" + + if current != update + config['backend'][lb_hash]['server'][bs_hash] = update + + write_haproxy_yml config + write_haproxy_cfg config + + rc = reload_haproxy + if rc == 0 + server['status'] = :deploy_success + else + server['status'] = :deploy_fail + end + else + server['status'] = :deploy_success + end + + return server + end + + def remove_backend_server(server) + server['status'] = :delete + + lb_hash, bs_hash = gen_lb_hash(server['lb']), gen_bs_hash(server) + + config = read_haproxy_yml + config['backend'][lb_hash]['server'].delete(bs_hash) + + write_haproxy_yml config + write_haproxy_cfg config + + rc = reload_haproxy + if rc == 0 + server['status'] = :undeploy_success + else + server['status'] = :undeploy_fail + end + + return server + end + + def refresh_dynamic_backend_servers(lbs, static_backend_servers, dynamic_backend_servers) + # query OneGate + output, rc = execute_cmd('onegate service show --json') + if rc.exitstatus != 0 + return dynamic_backend_servers, -1 + end + + oneflow_service = JSON.parse(output) + + # collect all VM IDs inside this OneGate/OneFlow service + # TODO: verify that those keys are really there + found_vms = [] + found_roles = oneflow_service['SERVICE']['roles'] + found_roles.each do |role| + role['nodes'].each do |node| + _vmid = node['vm_info']['VM']['ID'] + found_vms.append(_vmid.to_i) if !_vmid.to_s.strip.empty? + end + end + + # find all relevant context variables from user template + onegate_lbs = {} + found_vms.each do |vmid| + # query OneGate + output, rc = execute_cmd("onegate vm show --json #{vmid}") + if rc.exitstatus != 0 + next + end + + onegate_lbs[vmid] = {} + + vm_info = JSON.parse(output) + vm_info['VM']['USER_TEMPLATE'].each do |context_var, context_value| + if m = /^ONEGATE_HAPROXY_LB(?[0-9]+)_(?.*)$/.match(context_var) + lb_index = m['lbindex'].to_i + lb_key = m['lbkey'].to_s.downcase + + unless onegate_lbs[vmid].key?(lb_index) + onegate_lbs[vmid][lb_index] = {} + end + + onegate_lbs[vmid][lb_index][lb_key] = context_value + end + end + end + + # create an empty copy of dynamic backend servers + active_backend_servers = {} # to track active setup + dynamic_backend_servers.each do |lb_hash, _| + active_backend_servers[lb_hash] = {} + end + + # walk through all found dynamic lb configs and add backend servers in the + # case that such lb was configured otherwise skip it + onegate_lbs.each do |vmid, dyn_lbs| + dyn_lbs.each do |_, dyn_lb| + lb_config = {} + lb_config['lb-address'] = (dyn_lb['ip'] if dyn_lb.key?('ip')) || "" + lb_config['lb-port'] = (dyn_lb['port'] if dyn_lb.key?('port')) || "" + + # if lb is incomplete then hash is incomplete and no such lb will + # be found + lb_hash, lb = create_loadbalancer(lb_config) + + unless lb + logger.debug "VNF HAPROXY: Dynamic backend servers - LoadBalancer designation is incomplete: #{dyn_lb} - skipping..." + next + end + + # skip lb which is not configured + unless lbs.key?(lb_hash) + logger.debug "VNF HAPROXY: Dynamic backend servers - LoadBalancer does not exist: #{lb_hash} - skipping..." + next + end + + server_config = {} + server_config['server-host'] = (dyn_lb['server_host'] if dyn_lb.key?('server_host')) || "" + server_config['server-port'] = (dyn_lb['server_port'] if dyn_lb.key?('server_port')) || "" + + # skip server which does not have at least a host + if server_config['server-host'].to_s.strip.empty? + logger.debug "VNF HAPROXY: Dynamic backend servers - missing host part: #{dyn_lb} - skipping..." + next + end + + # + # configure dynamic backend servers + # + + server_hash, backend_server = create_backend_server(server_config, lbs[lb_hash]) + + unless backend_server + logger.debug "VNF HAPROXY: Dynamic backend servers - config is incomplete: #{dyn_lb} - skipping..." + next + end + + if static_backend_servers[lb_hash].key?(server_hash) + # TODO: skip or overwrite... + logger.debug "VNF HAPROXY: Dynamic backend servers - conflict with existing static backend server (#{server_hash}) - skipping..." + next + end + + if dynamic_backend_servers[lb_hash].key?(server_hash) + # update old one but do not deploy - let refresh do that + #backend_server = deploy_backend_server(backend_server, :update) + true + else + backend_server = deploy_backend_server(backend_server, :add) + + unless backend_server && backend_server['status'] == :deploy_success + logger.debug "VNF HAPROXY: Dynamic backend servers - failed to setup: #{server_hash} - skipping..." + next + end + end + + # save the dynamic backend server to track changes for refresh + dynamic_backend_servers[lb_hash][server_hash] = backend_server + active_backend_servers[lb_hash][server_hash] = backend_server + + # TODO: signal that backend server was successfully deployed + end + end + + # + # delete old backend servers configured via OneGate + # + + dynamic_backend_servers.each do |lb_hash, backend_servers| + backend_servers.each do |server_hash, backend_server| + if !active_backend_servers[lb_hash].key?(server_hash) + backend_server = remove_backend_server(backend_server) + + unless backend_server['status'] == :undeploy_success + logger.debug "VNF HAPROXY: Dynamic backend servers - failed to properly remove: #{server_hash}" + end + end + end + end + dynamic_backend_servers = active_backend_servers + + return dynamic_backend_servers, 0 + end + + def refresh_active_backend_servers(lbs, active_bs, static_bs, dynamic_bs) + servers_state = haproxy_show_servers_state + + # record and test all known backend servers + all_bs = {} + results = {} + lbs.each do |lb_hash, lb| + all_bs[lb_hash] = {} + results[lb_hash] = {} + + # add all static backend servers + static_bs.each do |_, backend_servers| + backend_servers.each do |server_hash, backend_server| + all_bs[lb_hash][server_hash] = backend_server + if servers_state.dig lb_hash, server_hash, 'srv_op_state' + results[lb_hash][server_hash] = servers_state[lb_hash][server_hash]['srv_op_state'].to_i + else + results[lb_hash][server_hash] = -1 + end + end + end + + # add all dynamic backend servers + dynamic_bs.each do |_, backend_servers| + backend_servers.each do |server_hash, backend_server| + all_bs[lb_hash][server_hash] = backend_server + if servers_state.dig lb_hash, server_hash, 'srv_op_state' + results[lb_hash][server_hash] = servers_state[lb_hash][server_hash]['srv_op_state'].to_i + else + results[lb_hash][server_hash] = -1 + end + end + end + end + + # now we can gather the results (one by one) + all_bs.each do |lb_hash, backend_servers| + backend_servers.each do |server_hash, backend_server| + test_result = results[lb_hash][server_hash] + + if test_result == 2 + # backend server is alive + + if active_bs[lb_hash].key?(server_hash) + # update it + backend_server = deploy_backend_server(backend_server, :update) + else + # re-add it + backend_server = deploy_backend_server(backend_server, :add) + end + + unless backend_server && backend_server['status'] == :deploy_success + logger.debug "VNF HAPROXY: Failed to refresh backend server: #{server_hash} - skipping..." + next + end + else + # backend server is dead + + # skip it if already is removed + next unless active_bs[lb_hash].key?(server_hash) + + srv_time_since_last_change = servers_state.dig lb_hash, server_hash, 'srv_time_since_last_change' + # prevent one-vnf from removing the server at once + if !srv_time_since_last_change.nil? and srv_time_since_last_change.to_i > 600 + backend_server = remove_backend_server(backend_server) + unless backend_server['status'] == :undeploy_success + logger.debug "VNF HAPROXY: Failed to remove dead backend server: #{server_hash}" + end + end + end + end + end + end + + def get_active_backend_servers(lbs) + # initialize active backend servers + active_backend_servers = {} + lbs.each do |lb_hash, _| + active_backend_servers[lb_hash] = {} + end + + bs_tuples = [] + haproxy_show_servers_state.each do |be_name, servers| + servers.each do |srv_name, state| + bs_tuples << { + 'host' => state['srv_addr'], + 'port' => state['srv_port'].to_i + } + end + end + + # for each tuple find lb and create backend server + bs_tuples.each do |bs_tuple| + lbs.each do |lb_hash, lb| + server_config = {} + server_config['server-host'] = bs_tuple['host'] + + # to be in sync with the usage in the rest of the plugin: + # no port ==> nil + # + # therefore ignore port 0 (zero) + if bs_tuple['port'] > 0 + server_config['server-port'] = bs_tuple['port'] + end + + server_hash, backend_server = create_backend_server(server_config, lb) + + unless backend_server + logger.debug "VNF HAPROXY: This is a bug in parsing active backend servers (#{bs_tuple}) - skipping..." + break + end + + active_backend_servers[lb_hash][server_hash] = backend_server + break + end + end + + return active_backend_servers + end +end +# rubocop:enable Style/Next +# rubocop:enable Style/RedundantReturn diff --git a/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/loadbalancer.rb b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/loadbalancer.rb new file mode 100644 index 00000000..8ba3e076 --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/loadbalancer.rb @@ -0,0 +1,1041 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# rubocop:disable Style/Next +# rubocop:disable Style/RedundantReturn + +# LoadBalancer VNF plugin +class LoadBalancer < Appliance::Plugin + + # + # plugin interface + # + + def initialize(app_config, logger) + super('loadbalancer', app_config, logger) + end + + def configure(app_config) + super + + # TODO: how to treat interfaces? Filter out LB addresses? + + # list of LB interfaces (and by extension their vnets via NIC ids) + @ifaces = [] + if @config.key?('interfaces') + @ifaces = @config['interfaces'] + end + + # TODO: is this naming scheme always valid: == ETH ? + # This will create dict such as this: + # { 0: "eth0", 3: "eth1" } + @nic_ids = {} + @ifaces.each do |nic| + @nic_ids[nic['one-name'].delete('^0-9')] = nic['real-name'] + end + + # TODO: improve reconfigure on SIGHUP or reload + # TODO: create and check new custom chain for LB + execute_cmd("ipvsadm --clear") + execute_cmd("iptables -t mangle -F PREROUTING") + + # + # prepare loadbalancer variables + # + + # TODO: do sanity checks + + # TODO: this will erase the old on reconfigure + # the following uses lb hash (ip:port:proto;) as keys + @lbs = {} + @static_real_servers = {} + @dynamic_real_servers = {} + + # each lb must have unique index + @lb_indices = {} + + @lb_configs = nil + if @config.key?('lbs') + @lb_configs = @config['lbs'] + unless @lb_configs.is_a?(Array) + logger.error "VNF LB: List of LBs must be an array - ABORT..." + return -1 + end + end + + if @config.key?('fwmark-offset') + @fwmark_offset = @config['fwmark-offset'] + unless @fwmark_offset.is_a?(Integer) && @fwmark_offset > 0 + @fwmark_offset = 10000 + logger.debug "VNF LB: fwmark must be an integer greater than zero - falling back to the default (#{fwmark_offset})..." + end + end + + @lb_onegate_enabled = nil + if @config.key?('onegate') && (!!@config['onegate'] == @config['onegate']) + @lb_onegate_enabled = @config['onegate'] + end + + # + # check that this plugin is actually enabled... + # + + # possibly skip rest of the configure section + if @config.key?('enabled') && (!!@config['enabled'] == @config['enabled']) + unless @config['enabled'] + logger.debug 'VNF LB: LoadBalancer plugin is disabled - no LB will be configured...' + return 0 + end + else + logger.debug "VNF LB: LoadBalancer plugin is not enabled or value is not boolean (#{@config['enabled']}) - no LB will be configured..." + return 0 + end + + # + # loop through config and create all validated lbs + # + + # TODO: improve sanity checks + + @lb_configs.each do |lb_config| + lb_hash, lb = configure_loadbalancer( + @lb_indices, + lb_config, + @fwmark_offset) + + unless lb + next + end + + lb = deploy_loadbalancer(lb, :add) + unless lb && lb['status'] == :deploy_success + logger.debug "VNF LB: Failed to setup LoadBalancer: #{lb_hash} - skipping..." + next + end + + # + # prepare global variables + # + + # store validated lb + + if @lbs.key?(lb_hash) + logger.debug "VNF LB: Duplicit LoadBalancer (#{lb_hash}) - skipping..." + next + else + @lbs[lb_hash] = lb + end + + unless @static_real_servers.key?(lb_hash) + @static_real_servers[lb_hash] = {} + end + + unless @dynamic_real_servers.key?(lb_hash) + @dynamic_real_servers[lb_hash] = {} + end + + # + # add static real servers + # + + if lb_config.key?('real-servers') + real_servers = lb_config['real-servers'] + else + next + end + + unless real_servers.is_a?(Array) && (real_servers.count > 0) + logger.debug 'VNF LB: No static real servers to configure for this LB' + next + end + + real_servers.each do |server_config| + server_hash, real_server = create_real_server(server_config, lb) + + unless real_server + logger.debug "VNF LB: Real server config is incomplete - skipping..." + next + end + + real_server = deploy_real_server(real_server, :add) + unless real_server && real_server['status'] == :deploy_success + logger.debug "VNF LB: Failed to setup real server: #{server_hash} - skipping..." + next + end + + # save the static real server to track changes for refresh + @static_real_servers[lb_hash][server_hash] = real_server + + # TODO: signal that real server was successfully deployed + end + end + end + + def run + # + # Dynamic real servers (OneGate) + # + + # no need to poll OneGate or monitor if we don't have any LB + return 0 unless @lbs.count > 0 + + # + # search for dynamic real servers if OneGate is enabled + # + + if @lb_onegate_enabled + @dynamic_real_servers, rc = refresh_dynamic_real_servers( + @lbs, + @static_real_servers, + @dynamic_real_servers) + + unless rc == 0 + logger.debug 'VNF LB: Failed to refresh dynamic real servers - check OneGate setup...' + end + end + + # + # Refresh / monitoring of real servers section + # + + # parse current LVS config + @active_real_servers = get_active_real_servers(@lbs) + + # walk through all LBs and re-add real servers or remove dead ones + refresh_active_real_servers( + @lbs, + @active_real_servers, + @static_real_servers, + @dynamic_real_servers) + end + + def cleanup + # this is executed on the one-vnf service termination or when the VNF + # plugin is disabled/stopped + logger.info 'Cleaning up Loadbalancer (removing all LVS rules)...' + execute_cmd("ipvsadm --clear") + execute_cmd("iptables -t mangle -F PREROUTING") + end + + private + + # + # other internal methods + # + + def execute_cmd(cmd_str, logme = true) + stdout, stderr, rc = Open3.capture3(cmd_str) + if (rc.exitstatus != 0) && logme + logger.error "VNF LB ERROR: #{stdout + stderr}" + end + + return stdout, rc + end + + def configure_loadbalancer(lb_indices, lb_config, fwmark_offset) + # gather lb info + lb = {} + lb['fwmark'] = nil + lb['address'] = nil + lb['protocol'] = nil + lb['tcp'] = nil + lb['udp'] = nil + lb['port'] = nil + lb['scheduler'] = nil + lb['method'] = nil + lb['timeout'] = nil + + # unique index + if lb_config.key?('index') && lb_config['index'].is_a?(Integer) + if lb_indices.key?(lb_config['index'].to_i) + logger.debug "VNF LB: Duplicit LoadBalancer index (#{lb_config['index']}) - skipping..." + return nil, nil + else + lb_indices[lb_config['index'].to_i] = true + end + else + logger.debug "VNF LB: LoadBalancer is missing integer index - skipping..." + return nil, nil + end + + # calculate fwmark + if lb_config.key?('lb-fwmark') && !lb_config['lb-fwmark'].to_s.strip.empty? + unless lb_config['lb-fwmark'].to_i > 0 + logger.debug "VNF LB: fwmark must be an integer > 0 ('#{lb_config['lb-fwmark']}') - skipping..." + return nil, nil + end + lb['fwmark'] = lb_config['lb-fwmark'].to_i + else + # use offset and index instead (safer) + lb['fwmark'] = fwmark_offset.to_i + lb_config['index'].to_i + end + + if lb_config.key?('lb-address') && !lb_config['lb-address'].to_s.strip.empty? + lb['address'] = lb_config['lb-address'].to_s.strip + else + logger.debug "VNF LB: LoadBalancer is missing address - skipping..." + return nil, nil + end + + if lb_config.key?('lb-port') && !lb_config['lb-port'].to_s.strip.empty? + lb['port'] = lb_config['lb-port'].to_i + end + + if lb_config.key?('lb-protocol') && !lb_config['lb-protocol'].to_s.strip.empty? + lb['protocol'] = lb_config['lb-protocol'].to_s.strip.downcase + case lb['protocol'] + when 'tcp' + lb['tcp'] = true + when 'udp' + lb['udp'] = true + when 'both' + lb['tcp'] = true + lb['udp'] = true + else + logger.debug "VNF LB: Unsupported protocol: '#{lb_config['lb-protocol']}' - skipping..." + return nil, nil + end + end + + # port v protocol sanity check + if lb['port'] || lb['tcp'] || lb['udp'] + unless lb['port'] && ( lb['tcp'] || lb['udp'] ) + logger.debug "VNF LB: Both port ('#{lb['port']}') and protocol must be set or none - skipping..." + return nil, nil + end + end + + if lb_config.key?('lb-scheduler') && !lb_config['lb-scheduler'].to_s.strip.empty? + lb['scheduler'] = lb_config['lb-scheduler'].to_s.strip.downcase + # TODO: should I validate the value? Currently leaving to ipvsadm + end + + if lb_config.key?('lb-method') && !lb_config['lb-method'].to_s.strip.empty? + lb['method'] = lb_config['lb-method'].to_s.strip.downcase + unless lb_method(lb) + logger.debug "VNF LB: Unsupported method: '#{lb_config['lb-method']}' - skipping..." + return nil, nil + end + else + # default is masquerade + lb['method'] = 'nat' + end + + if lb_config.key?('lb-timeout') && !lb_config['lb-timeout'].to_s.strip.empty? + unless lb_config['lb-timeout'].to_i > 0 + logger.debug "VNF LB: Timeout must be an integer > 0 ('#{lb_config['lb-timeout']}') - skipping..." + return nil, nil + end + lb['timeout'] = lb_config['lb-timeout'].to_i + else + # default is 10s + lb['timeout'] = 10 + end + + # + # return loadbalancer with hash/index + # + + lb_hash = gen_lb_hash(lb) + + return lb_hash, lb + end + + # creates just a stub for internal usage + def create_loadbalancer(lb_config) + # gather lb info + lb = {} + lb['address'] = nil + lb['protocol'] = nil + lb['port'] = nil + lb['tcp'] = nil + lb['udp'] = nil + + if lb_config.key?('lb-address') && !lb_config['lb-address'].to_s.strip.empty? + lb['address'] = lb_config['lb-address'].to_s.strip + else + return nil, nil + end + + if lb_config.key?('lb-port') && !lb_config['lb-port'].to_s.strip.empty? + lb['port'] = lb_config['lb-port'].to_i + end + + if lb_config.key?('lb-protocol') && !lb_config['lb-protocol'].to_s.strip.empty? + lb['protocol'] = lb_config['lb-protocol'].to_s.strip.downcase + case lb['protocol'] + when 'tcp' + lb['tcp'] = true + when 'udp' + lb['udp'] = true + when 'both' + lb['tcp'] = true + lb['udp'] = true + else + return nil, nil + end + end + + # port v protocol sanity check + if lb['port'] || lb['tcp'] || lb['udp'] + unless lb['port'] && ( lb['tcp'] || lb['udp'] ) + return nil, nil + end + end + + # + # return loadbalancer with hash/index + # + + lb_hash = gen_lb_hash(lb) + + return lb_hash, lb + end + + # sanitize a server config and return validated real server + def create_real_server(server_config, lb) + server = {} + server['lb'] = lb + server['server-host'] = nil + server['server-port'] = nil + server['server-weight'] = nil + server['server-ulimit'] = nil + server['server-llimit'] = nil + + # TODO: sanity checks - eg. port must be integer ('x'.to_i makes zero...) + if server_config.key?('server-host') && !server_config['server-host'].to_s.strip.empty? + server['server-host'] = server_config['server-host'].to_s.strip + else + logger.debug 'VNF LB: Missing mandatory real server host - skipping...' + return nil, nil + end + + if server_config.key?('server-port') && !server_config['server-port'].to_s.strip.empty? + unless server_config['server-port'].to_i >= 0 + logger.debug 'VNF LB: Real server port must be an integer - skipping...' + return nil, nil + end + server['server-port'] = server_config['server-port'].to_i + end + + if server_config.key?('server-weight') && !server_config['server-weight'].to_s.strip.empty? + unless server_config['server-weight'].to_i >= 0 + logger.debug 'VNF LB: Server weight must be an integer - ignoring...' + end + server['server-weight'] = server_config['server-weight'].to_i + end + + if server_config.key?('server-ulimit') && !server_config['server-ulimit'].to_s.strip.empty? + unless server_config['server-ulimit'].to_i >= 0 + logger.debug 'VNF LB: Server upper limit must be an integer - ignoring...' + end + server['server-ulimit'] = server_config['server-ulimit'].to_i + end + + if server_config.key?('server-llimit') && !server_config['server-llimit'].to_s.strip.empty? + unless server_config['server-llimit'].to_i >= 0 + logger.debug 'VNF LB: Server lower limit must be an integer - ignoring...' + end + server['server-llimit'] = server_config['server-llimit'].to_i + end + + # + # return real server with hash/index + # + + rs_hash = "#{server['server-host']}:#{server['server-port']}" + + return rs_hash, server + end + + def gen_lb_hash(lb) + return "#{lb['address']}:#{lb['port']}:#{lb['protocol']};" + end + + def assemble_lb_cmds(lb, cmds = []) + + # + # assemble ipvsadm command for LB + # + + case lb['status'] + when :add + cmd = 'ipvsadm -A' + when :update + cmd = 'ipvsadm -E' + when :delete + cmd = 'ipvsadm -D' + end + undo_cmd = 'ipvsadm -D' + + cmd += " -f #{lb['fwmark']}" + undo_cmd += " -f #{lb['fwmark']}" + + cmd += " -s #{lb['scheduler']}" if lb['scheduler'] + + cmds.append({:cmd => cmd, :undo => undo_cmd}) + + # + # create iptable rule(s) with a firewall mark + # + + # TODO: improve this with ifaces and chains + + if lb['tcp'] && lb['udp'] + # tcp + cmd = "iptables -t mangle -A" + undo_cmd = "iptables -t mangle -D" + + arg = " PREROUTING -d #{lb['address']}" + arg += " -m tcp -p tcp --dport #{lb['port']}" + arg += " -j MARK --set-mark #{lb['fwmark']}" + + cmd += arg + undo_cmd += arg + + cmds.append({:cmd => cmd, :undo => undo_cmd}) + + # udp + cmd = "iptables -t mangle -A" + undo_cmd = "iptables -t mangle -D" + + arg = " PREROUTING -d #{lb['address']}" + arg += " -m udp -p udp --dport #{lb['port']}" + arg += " -j MARK --set-mark #{lb['fwmark']}" + + cmd += arg + undo_cmd += arg + + cmds.append({:cmd => cmd, :undo => undo_cmd}) + elsif lb['tcp'] || lb['udp'] + cmd = "iptables -t mangle -A" + undo_cmd = "iptables -t mangle -D" + + arg = " PREROUTING -d #{lb['address']}" + arg += " -m tcp -p tcp --dport #{lb['port']}" if lb['tcp'] + arg += " -m udp -p udp --dport #{lb['port']}" if lb['udp'] + arg += " -j MARK --set-mark #{lb['fwmark']}" + + cmd += arg + undo_cmd += arg + + cmds.append({:cmd => cmd, :undo => undo_cmd}) + else + cmd = "iptables -t mangle -A" + undo_cmd = "iptables -t mangle -D" + + arg = " PREROUTING -d #{lb['address']}" + arg += " -j MARK --set-mark #{lb['fwmark']}" + + cmd += arg + undo_cmd += arg + + cmds.append({:cmd => cmd, :undo => undo_cmd}) + end + + return cmds + end + + def deploy_loadbalancer(lb, status = :add) + lb['status'] = status + + unless [:add, :update].include? lb['status'] + logger.error "VNF LB: This is a bug: wrong internal state for deploy of LoadBalancer (status: #{lb['status']})..." + return nil + end + + rc = run_cmds(assemble_lb_cmds(lb)) + + if rc == 0 + lb['status'] = :deploy_success + else + lb['status'] = :deploy_fail + end + + return lb + end + + def assemble_rs_cmds(server, cmds = []) + lb = server['lb'] + + case server['status'] + when :add + cmd = 'ipvsadm -a' + when :update + cmd = 'ipvsadm -e' + when :delete + cmd = 'ipvsadm -d' + end + undo_cmd = 'ipvsadm -d' + + arg = " -f #{lb['fwmark']}" + + if server['server-port'] + arg += " -r #{server['server-host']}:#{server['server-port']}" + else + arg += " -r #{server['server-host']}" + end + cmd += arg + undo_cmd += arg + + case server['status'] + when :add,:update + cmd += " -w #{server['server-weight']}" if server['server-weight'] + cmd += " -x #{server['server-ulimit']}" if server['server-ulimit'] + cmd += " -y #{server['server-llimit']}" if server['server-llimit'] + cmd += " #{lb_method(lb)}" + end + + cmds.append({:cmd => cmd, :undo => undo_cmd}) + + return cmds + end + + def deploy_real_server(server, status = :add) + server['status'] = status + + unless [:add, :update].include? server['status'] + logger.error "VNF LB: This is a bug: wrong internal state for deploy of real server (status: #{server['status']})..." + return nil + end + + rc = run_cmds(assemble_rs_cmds(server)) + + if rc == 0 + server['status'] = :deploy_success + else + server['status'] = :deploy_fail + end + + return server + end + + def remove_real_server(server) + server['status'] = :delete + + rc = run_cmds(assemble_rs_cmds(server)) + + if rc == 0 + server['status'] = :undeploy_success + else + server['status'] = :undeploy_fail + end + + return server + end + + def run_cmds(cmds) + cmds_undo_stack = [] + cmds.each do |cmd_item| + _, rc = execute_cmd(cmd_item[:cmd]) + if rc.exitstatus != 0 + # revert all previous steps + cmds_undo_stack.each do |undo_cmd| + execute_cmd(undo_cmd) || true + end + return -1 + end + cmds_undo_stack.unshift(cmd_item[:undo]) + end + return 0 + end + + def lb_method(lb) + case lb['method'] + when 'nat' + return '-m' + when 'dr' + return '-g' + end + return nil + end + + def refresh_dynamic_real_servers(lbs, static_real_servers, dynamic_real_servers) + # query OneGate + output, rc = execute_cmd('onegate service show --json') + if rc.exitstatus != 0 + return dynamic_real_servers, -1 + end + + oneflow_service = JSON.parse(output) + + # collect all VM IDs inside this OneGate/OneFlow service + # TODO: verify that those keys are really there + found_vms = [] + found_roles = oneflow_service['SERVICE']['roles'] + found_roles.each do |role| + role['nodes'].each do |node| + _vmid = node['vm_info']['VM']['ID'] + found_vms.append(_vmid.to_i) if !_vmid.to_s.strip.empty? + end + end + + # find all relevant context variables from user template + onegate_lbs = {} + found_vms.each do |vmid| + # query OneGate + output, rc = execute_cmd("onegate vm show --json #{vmid}") + if rc.exitstatus != 0 + next + end + + onegate_lbs[vmid] = {} + + vm_info = JSON.parse(output) + vm_info['VM']['USER_TEMPLATE'].each do |context_var, context_value| + if m = /^ONEGATE_LB(?[0-9]+)_(?.*)$/.match(context_var) + lb_index = m['lbindex'].to_i + lb_key = m['lbkey'].to_s.downcase + + unless onegate_lbs[vmid].key?(lb_index) + onegate_lbs[vmid][lb_index] = {} + end + + onegate_lbs[vmid][lb_index][lb_key] = context_value + end + end + end + + # create an empty copy of dynamic real servers + active_real_servers = {} # to track active setup + dynamic_real_servers.each do |lb_hash, _| + active_real_servers[lb_hash] = {} + end + + # walk through all found dynamic lb configs and add real servers in the + # case that such lb was configured otherwise skip it + onegate_lbs.each do |vmid, dyn_lbs| + dyn_lbs.each do |_, dyn_lb| + lb_config = {} + lb_config['lb-address'] = (dyn_lb['ip'] if dyn_lb.key?('ip')) || "" + lb_config['lb-protocol'] = (dyn_lb['protocol'] if dyn_lb.key?('protocol')) || "" + lb_config['lb-port'] = (dyn_lb['port'] if dyn_lb.key?('port')) || "" + + # if lb is incomplete then hash is incomplete and no such lb will + # be found + lb_hash, lb = create_loadbalancer(lb_config) + + unless lb + logger.debug "VNF LB: Dynamic real servers - LoadBalancer designation is incomplete: #{dyn_lb} - skipping..." + next + end + + # skip lb which is not configured + unless lbs.key?(lb_hash) + logger.debug "VNF LB: Dynamic real servers - LoadBalancer does not exist: #{lb_hash} - skipping..." + next + end + + server_config = {} + server_config['server-host'] = (dyn_lb['server_host'] if dyn_lb.key?('server_host')) || "" + server_config['server-port'] = (dyn_lb['server_port'] if dyn_lb.key?('server_port')) || "" + server_config['server-weight'] = (dyn_lb['server_weight'] if dyn_lb.key?('server_weight')) || "" + server_config['server-ulimit'] = (dyn_lb['server_ulimit'] if dyn_lb.key?('server_ulimit')) || "" + server_config['server-llimit'] = (dyn_lb['server_llimit'] if dyn_lb.key?('server_llimit')) || "" + + # skip server which does not have at least a host + if server_config['server-host'].to_s.strip.empty? + logger.debug "VNF LB: Dynamic real servers - missing host part: #{dyn_lb} - skipping..." + next + end + + # + # configure dynamic real servers + # + + server_hash, real_server = create_real_server(server_config, lbs[lb_hash]) + + unless real_server + logger.debug "VNF LB: Dynamic real servers - config is incomplete: #{dyn_lb} - skipping..." + next + end + + if static_real_servers[lb_hash].key?(server_hash) + # TODO: skip or overwrite... + logger.debug "VNF LB: Dynamic real servers - conflict with existing static real server (#{server_hash}) - skipping..." + next + end + + if dynamic_real_servers[lb_hash].key?(server_hash) + # update old one but do not deploy - let refresh do that + #real_server = deploy_real_server(real_server, :update) + true + else + real_server = deploy_real_server(real_server, :add) + + unless real_server && real_server['status'] == :deploy_success + logger.debug "VNF LB: Dynamic real servers - failed to setup: #{server_hash} - skipping..." + next + end + end + + # save the dynamic real server to track changes for refresh + dynamic_real_servers[lb_hash][server_hash] = real_server + active_real_servers[lb_hash][server_hash] = real_server + + # TODO: signal that real server was successfully deployed + end + end + + # + # delete old real servers configured via OneGate + # + + dynamic_real_servers.each do |lb_hash, real_servers| + real_servers.each do |server_hash, real_server| + if !active_real_servers[lb_hash].key?(server_hash) + real_server = remove_real_server(real_server) + + unless real_server['status'] == :undeploy_success + logger.debug "VNF LB: Dynamic real servers - failed to properly remove: #{server_hash}" + end + end + end + end + dynamic_real_servers = active_real_servers + + return dynamic_real_servers, 0 + end + + def refresh_active_real_servers(lbs, active_rs, static_rs, dynamic_rs) + # we will utilize Healthcheck object running all tests concurrently + healthcheck = Healthcheck.new + + # record and test all known real servers + all_rs = {} + results = {} + lbs.each do |lb_hash, lb| + all_rs[lb_hash] = {} + results[lb_hash] = {} + + # add all static real servers + static_rs.each do |_, real_servers| + real_servers.each do |server_hash, real_server| + all_rs[lb_hash][server_hash] = real_server + results[lb_hash][server_hash] = healthcheck.async.test(real_server) + end + end + + # add all dynamic real servers + dynamic_rs.each do |_, real_servers| + real_servers.each do |server_hash, real_server| + all_rs[lb_hash][server_hash] = real_server + results[lb_hash][server_hash] = healthcheck.async.test(real_server) + end + end + end + + # now we can gather the results (one by one) + all_rs.each do |lb_hash, real_servers| + real_servers.each do |server_hash, real_server| + test_result = results[lb_hash][server_hash].value + + if test_result == 0 + # real server is alive + + if active_rs[lb_hash].key?(server_hash) + # update it + real_server = deploy_real_server(real_server, :update) + else + # re-add it + real_server = deploy_real_server(real_server, :add) + end + + unless real_server && real_server['status'] == :deploy_success + logger.debug "VNF LB: Failed to refresh real server: #{server_hash} - skipping..." + next + end + else + # real server is dead + + # skip it if already is removed + next unless active_rs[lb_hash].key?(server_hash) + + real_server = remove_real_server(real_server) + + unless real_server['status'] == :undeploy_success + logger.debug "VNF LB: Failed to remove dead real server: #{server_hash}" + end + end + end + end + end + + def get_active_real_servers(lbs) + # initialize active real servers + active_real_servers = {} + lbs.each do |lb_hash, _| + active_real_servers[lb_hash] = {} + end + + # query ipvsadm + output, rc = execute_cmd('ipvsadm --save') + if rc.exitstatus != 0 + return nil + end + + # parse ipvsadm output and create triplets (fwmark, host, port) + rs_triplets = [] + output.each_line do |rs| + rs_triplet = {} + + if m = / -f (?[0-9]+) /.match(rs) + rs_triplet['fwmark'] = m['fwmark'].to_i + end + + if m = / -r (?[^:]+):(?[0-9]+) /.match(rs) + rs_triplet['host'] = m['host'].to_s + rs_triplet['port'] = m['port'].to_i + end + + if rs_triplet.key?('fwmark') && rs_triplet.key?('host') + rs_triplets.append(rs_triplet) + end + end + + # for each triplet find lb and create real server + rs_triplets.each do |rs_triplet| + lbs.each do |lb_hash, lb| + next unless lb['fwmark'] == rs_triplet['fwmark'] + + server_config = {} + server_config['server-host'] = rs_triplet['host'] + + # to be in sync with the usage in the rest of the plugin: + # no port ==> nil + # + # therefore ignore port 0 (zero) + if rs_triplet['port'] > 0 + server_config['server-port'] = rs_triplet['port'] + end + + server_hash, real_server = create_real_server(server_config, lb) + + unless real_server + logger.debug "VNF LB: This is a bug in parsing active real servers (#{rs_triplet}) - skipping..." + break + end + + active_real_servers[lb_hash][server_hash] = real_server + break + end + end + + return active_real_servers + end +end + +# TODO: this deserves more love +class Healthcheck + include Concurrent::Async + + def test(real_server) + tcp = real_server['lb']['tcp'] + udp = real_server['lb']['udp'] + timeout = real_server['lb']['timeout'] + host = real_server['server-host'] + port = real_server['server-port'] + + result = 0 + + if tcp || udp + result = tcp_check(host, port, timeout) if tcp + + return result unless result == 0 + + result = udp_check(host, port, timeout) if udp + else + result = ping_check(host, timeout) + end + + return result + end + + # shamelessly copied from here: + # https://spin.atomicobject.com/2013/09/30/socket-connection-timeout-ruby/ + def tcp_connect(host, port, timeout) + # Convert the passed host into structures the non-blocking calls + # can deal with + addr = Socket.getaddrinfo(host, nil) + sockaddr = Socket.pack_sockaddr_in(port, addr[0][3]) + + Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0).tap do |socket| + socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) + + begin + # Initiate the socket connection in the background. If it doesn't fail + # immediatelyit will raise an IO::WaitWritable (Errno::EINPROGRESS) + # indicating the connection is in progress. + socket.connect_nonblock(sockaddr) + + rescue IO::WaitWritable + # IO.select will block until the socket is writable or the timeout + # is exceeded - whichever comes first. + if IO.select(nil, [socket], nil, timeout) + begin + # Verify there is now a good connection + socket.connect_nonblock(sockaddr) + rescue Errno::EISCONN + # Good news everybody, the socket is connected! + rescue + # An unexpected exception was raised - the connection is no good. + socket.close + raise + end + else + # IO.select returns nil when the socket is not ready before timeout + # seconds have elapsed + socket.close + raise "Connection timeout" + end + end + end + end + + def cmd_check(cmd_str) + stdout, stderr, rc = Open3.capture3(cmd_str) + + return stdout, stderr, rc + end + + # trivial tcp check + def tcp_check(host, port, timeout) + result = -1 + begin + socket = tcp_connect(host, port, timeout) + result = 0 + socket.close + rescue + result = -1 + end + + return result + end + + # TODO: create a better version... + # trivial udp check (it will just ping...) + def udp_check(host, port, timeout) + return ping_check(host, timeout) + end + + # trivial ping check (using fping) + def ping_check(host, timeout) + _, _, rc = cmd_check("fping -c 1 -t #{timeout * 1000} #{host}") + return rc.exitstatus + end +end +# rubocop:enable Style/Next +# rubocop:enable Style/RedundantReturn diff --git a/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/sdnat4.rb b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/sdnat4.rb new file mode 100644 index 00000000..8049ccd1 --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/lib/appliance/plugin/sdnat4.rb @@ -0,0 +1,407 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# rubocop:disable Style/Next +# rubocop:disable Style/RedundantReturn + +# SNAT/DNAT IPv4 VNF plugin +class SDNAT4 < Appliance::Plugin + + CHAINS = { + 'PREROUTING' => 'one-dnat4', + 'POSTROUTING' => 'one-snat4' + } + + # + # plugin interface + # + + def initialize(app_config, logger) + super('sdnat4', app_config, logger) + end + + def configure(app_config) + super + + # list of NATed interfaces (and by extension their vnets via NIC ids) + @ifaces = [] + if @config.key?('interfaces') + @ifaces = @config['interfaces'] + end + + # TODO: is this naming scheme always valid: == ETH ? + # This will create dict such as this: + # { 0: "eth0", 3: "eth1" } + @nic_ids = {} + @ifaces.each do |nic| + @nic_ids[nic['one-name'].delete('^0-9')] = nic['real-name'] + end + + # TODO: should not more logic from run to be moved here? + end + + def run + # TODO: one enabled interface makes little sense - only if VNF itself + # is doing SNAT/DNAT on itself...should this be ifaces.count > 1 ? + unless @ifaces.is_a?(Array) && (@ifaces.count > 0) + logger.debug 'VNF SNAT/DNAT IPv4: no NATed interfaces provided' + return 0 + end + + # Query OneGate to discover which networks are attached to the vrouter + output, rc = execute_cmd('onegate vrouter show --json --extended') + if rc.exitstatus != 0 + return -1 + end + + vrouter = JSON.parse(output) + + # filter vnets based on the NATed interfaces *AND* store the real NIC + # name + nics = vrouter['VROUTER']['TEMPLATE']['NIC'] + allowed_nics = [] + nics.each do |nic| + if @nic_ids.key?(nic['NIC_ID']) + nic['REAL_NIC_NAME'] = @nic_ids[nic['NIC_ID']] + allowed_nics.append(nic) + end + end + + # inspect all vnets attached to our NICs in search for external aliases + network_ids = traverse_networks(allowed_nics) + mapping = [] + network_ids.each do |network_id| + output, rc = execute_cmd('onegate vnet show --json --extended'\ + " #{network_id}") + if rc.exitstatus != 0 + return -1 + end + + vnet = JSON.parse(output) + + ars = vnet['VNET']['AR_POOL']['AR'] + + ars.each do |ar| + leases = [] + if ar['LEASES'].key?('LEASE') + leases = ar['LEASES']['LEASE'] + end + + leases.each do |lease| + new_map = {} + if lease.key?('EXTERNAL') && lease['EXTERNAL'] + new_map['EXTERNAL_ALIAS_VM'] = lease['VM'] + new_map['EXTERNAL_ALIAS_IP'] = lease['IP'] + new_map['EXTERNAL_ALIAS_PARENT_NIC'] = lease['PARENT'] + new_map['EXTERNAL_ALIAS_PARENT_NETWORK'] = \ + lease['PARENT_NETWORK_ID'] + mapping.append(new_map) + end + end + end + end + + # the second part of the pair + filtering (its network_id must match + # vrouter's allowed interface) + nic_map = [] + mapping.each do |alias_map| + network_id = alias_map['EXTERNAL_ALIAS_PARENT_NETWORK'] + + # filter interface + unless (nic = contains_network?(allowed_nics, network_id)) + next + end + + new_map = alias_map.dup + new_map['REAL_NIC_NAME'] = nic['REAL_NIC_NAME'] + new_map['NETWORK_ID'] = nic['NETWORK_ID'] + new_map['NIC_ID'] = nic['NIC_ID'] + + output, rc = execute_cmd('onegate vnet show --json --extended'\ + " #{network_id}") + if rc.exitstatus != 0 + return -1 + end + + vnet = JSON.parse(output) + + ars = vnet['VNET']['AR_POOL']['AR'] + + ars.each do |ar| + leases = [] + if ar['LEASES'].key?('LEASE') + leases = ar['LEASES']['LEASE'] + end + + leases.each do |lease| + if (lease['NIC_NAME'] == \ + alias_map['EXTERNAL_ALIAS_PARENT_NIC']) && \ + (lease['VM'] == alias_map['EXTERNAL_ALIAS_VM']) + new_map['EXTERNAL_ALIAS_DEST_IP'] = lease['IP'] + nic_map.append(new_map) + end + end + end + end + + # refresh iptables rules + + # Get initial iptables rules required as if there no NIC_ALIAS/NIC + # mappings + rules_pre = iptables_tnat_apply_init + # Modify initial iptables rules with NIC_ALIAS/NIC mappings + rules_post = iptables_tnat_apply_merge(rules_pre, nic_map) + + # Apply the inferred iptables rules + rules_post.each_line do |rule| + _, rc = execute_cmd("iptables -tnat #{rule}") + if rc.exitstatus != 0 + return -1 + end + end + + # add/remove aliased IPs from the vrouter + + current_ips = assigned_loopback_ips + + # filter through current ips (mark stale and prepare new) + new_ips = [] + nic_map.each do |alias_map| + if current_ips.include?(alias_map['EXTERNAL_ALIAS_IP']) + current_ips.delete(alias_map['EXTERNAL_ALIAS_IP']) + else + new_ips.append(alias_map['EXTERNAL_ALIAS_IP']) + end + end + + # delete extraneous IPs + current_ips.each do |ip| + _, rc = execute_cmd("ip address del #{ip}/32 dev lo") + if rc.exitstatus != 0 + return -1 + end + end + + # add new IPs + new_ips.each do |ip| + _, rc = execute_cmd("ip address add #{ip}/32 dev lo") + if rc.exitstatus != 0 + return -1 + end + end + end + + def cleanup + # remove all our iptables rules + CHAINS.each do |nat_chain, custom_chain| + _, rc = execute_cmd("iptables -tnat -S #{custom_chain}", false) + + if rc.exitstatus == 0 + # chain exists + + # flush rules in the chain + execute_cmd("iptables -tnat -F #{custom_chain}") + + # remove reference from the parent chain + execute_cmd("iptables -tnat -D #{nat_chain}"\ + " -j #{custom_chain}") + + # delete the chain + execute_cmd("iptables -tnat -X #{custom_chain}") + end + end + + # remove all our ips from the loopback interface + current_ips = assigned_loopback_ips + current_ips.each do |ip| + execute_cmd("ip address del #{ip}/32 dev lo") + end + end + + private + + # + # other internal methods + # + + def execute_cmd(cmd_str, logme = true) + stdout, stderr, rc = Open3.capture3(cmd_str) + if (rc.exitstatus != 0) && logme + logger.error "VNF SNAT/DNAT IPv4 ERROR: #{stdout + stderr}" + end + + return stdout, rc + end + + def assigned_loopback_ips + current_ips = [] + addrs = Socket.getifaddrs + addrs.each do |addr| + if addr && (addr.name == 'lo') && addr.addr.ipv4? + ip = addr.addr.ip_address + if ip !~ /^127/ + current_ips.append(ip) + end + end + end + + return current_ips + end + + # get iptables rules to apply for NAT table if no NIC/NIC_ALIAS detected + def iptables_tnat_apply_init + rules = '' + + CHAINS.each do |nat_chain, custom_chain| + output, rc = execute_cmd("iptables -tnat -S #{custom_chain}", false) + + if rc.exitstatus != 0 + # The chain does not exist, add rules to create it + rules += "-N #{custom_chain}\n" + else + output.each_line do |r| + next if r.include?("-N #{custom_chain}") + + # The chain does exist, add all rules belonging to the + # chain and mark them to be deleted initially + rules += r.gsub(/-A (.*)/, '-D \1') + end + end + + # ensure that our chain is entered first + output, = execute_cmd("iptables -tnat -S #{nat_chain} 1") + if output.strip != "-A #{nat_chain} -j #{custom_chain}".strip + rules += "-I #{nat_chain} 1 -j #{custom_chain}\n" + end + + # TODO: wipe out redundant rules + end + + rules + end + + # merge intial iptables rules to apply for NAT with the ones needed by + # NIC/NIC_ALIAS mapping + def iptables_tnat_apply_merge(rules, nics_maps) + nics_maps.each do |nat| + # DNAT rule + # TODO: should we create some list of allowed interfaces? + # jdnat = "#{CHAINS['PREROUTING']} -i #{nat['REAL_NIC_NAME']}"\ + jdnat = "#{CHAINS['PREROUTING']}"\ + " -d #{nat['EXTERNAL_ALIAS_IP']}/32"\ + ' -j DNAT'\ + " --to-destination #{nat['EXTERNAL_ALIAS_DEST_IP']}" + # Try to delete -D DNAT rule which means previously NIC_ALIAS still + # attached + if !rules.gsub!(/-D #{jdnat}\n/, '') + # Add -A rule if not DNAT rule found which means new NIC_ALIAS + # has been attached + rules += "-A #{jdnat}\n" + end + + # SNAT rule + # TODO: should we create some list of allowed interfaces? + # jsnat = "#{CHAINS['POSTROUTING']} -o #{nat['REAL_NIC_NAME']}"\ + jsnat = "#{CHAINS['POSTROUTING']}"\ + " -s #{nat['EXTERNAL_ALIAS_DEST_IP']}/32"\ + ' -j SNAT'\ + " --to-source #{nat['EXTERNAL_ALIAS_IP']}" + # Try to delete -D SNAT rule which means previously NIC_ALIAS still + # attached + if !rules.gsub!(/-D #{jsnat}\n/, '') + # Add -A rule if not SNAT rule found which means new NIC_ALIAS + # has been attached + rules += "-A #{jsnat}\n" + end + end + + rules + end + + def contains_network?(nics, network_id) + nics.each do |nic| + if nic['NETWORK_ID'] == network_id + return nic + end + end + + return false + end + + def recursive_network_traversing(initial_network_ids, searched_id) + output, rc = execute_cmd('onegate vnet show'\ + " --json --extended #{searched_id}") + if rc.exitstatus != 0 + # TODO: maybe exception and handle by caller? + return initial_network_ids + end + + vnet = JSON.parse(output) + network_ids = initial_network_ids.dup + new_found_network_ids = [] + + # check if the current vnet has a parent + if (parent_network_id = Integer(vnet['VNET']['PARENT_NETWORK_ID']) \ + rescue false) && !network_ids.include?(parent_network_id) + network_ids.append(parent_network_id) + new_found_network_ids.append(parent_network_id) + end + + # check VNETs under LEASE section + ars = vnet['VNET']['AR_POOL']['AR'] + + ars.each do |ar| + leases = [] + if ar['LEASES'].key?('LEASE') + leases = ar['LEASES']['LEASE'] + end + + leases.each do |lease| + if lease.key?('VNET') && !network_ids.include?(lease['VNET']) + network_ids.append(lease['VNET']) + new_found_network_ids.append(lease['VNET']) + end + end + end + + # we recurse the new found ids and this also serves as a termination + # condition when there is no new found network id + new_found_network_ids.each do |network_id| + network_ids = recursive_network_traversing(network_ids, network_id) + end + + return network_ids + end + + def traverse_networks(nics) + network_ids = [] + + nics.each do |nic| + # TODO: error checking + network_ids.append(nic['NETWORK_ID']) + end + + new_found_network_ids = network_ids.dup + new_found_network_ids.each do |network_id| + network_ids = recursive_network_traversing(network_ids, network_id) + end + + return network_ids + end + +end +# rubocop:enable Style/Next +# rubocop:enable Style/RedundantReturn diff --git a/appliances/lib/artifacts/vnf/one-vnf/one-vnf.rb b/appliances/lib/artifacts/vnf/one-vnf/one-vnf.rb new file mode 100755 index 00000000..87eca4bb --- /dev/null +++ b/appliances/lib/artifacts/vnf/one-vnf/one-vnf.rb @@ -0,0 +1,146 @@ +#!/usr/bin/env ruby + +# -------------------------------------------------------------------------- # +# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'syslog/logger' +require 'json' +require 'yaml' +require 'getoptlong' +require 'ipaddr' +require 'open3' +require 'socket' +require 'set' +require 'concurrent' + +# load appliance plugin framework +require_relative 'lib/appliance' +require_relative 'lib/appliance/plugin' + +# defaults +config_file = '/opt/one-appliance/etc/one-vnf-config.js' + +begin + GetoptLong.new( + ['--config', '-c', GetoptLong::REQUIRED_ARGUMENT], + ['--help', '-h', GetoptLong::NO_ARGUMENT] + ).each do |opt, arg| + case opt + when '--config' + config_file = arg + when '--help' + puts <<~EOT + #{File.basename($PROGRAM_NAME)} [-h|--help] + #{File.basename($PROGRAM_NAME)} [-c|--config CONFIG_FILE] run + #{File.basename($PROGRAM_NAME)} [-c|--config CONFIG_FILE] get + #{File.basename($PROGRAM_NAME)} [-c|--config CONFIG_FILE] set enabled|disabled + EOT + + exit(0) + end + end +rescue StandardError => e + STDERR.puts e.to_s + exit(-1) +end + +# default action is run +action = :run + +if ARGV.length > 0 + command = ARGV.shift + case command + when 'run' + action = :run + # continue below + when 'get' + action = :get + app = Appliance.new(config_file, action) + + if ARGV.length != 1 + STDERR.puts 'Missing argument for get' + exit(-1) + end + + plugin_name = ARGV.shift.to_s.strip + + + if app.get_plugin(plugin_name) + puts "enabled" + else + puts "disabled" + end + + exit(0) + when 'set' + action = :set + app = Appliance.new(config_file, action) + + if ARGV.length != 2 + STDERR.puts 'Missing argument(s) for set' + exit(-1) + end + + plugin_name = ARGV.shift.to_s.strip + plugin_state = ARGV.shift.to_s.strip.downcase + + case plugin_state + when 'enabled' + plugin_state = true + when 'disabled' + plugin_state = false + else + STDERR.puts "Unknown plugin state: #{plugin_state}" + exit(-1) + end + + if app.set_plugin(plugin_name, plugin_state) + app.save_config + end + + exit(0) + else + STDERR.puts "Unknown argument: #{command}" + exit(-1) + end +end + +# terminate +exit(0) unless action == :run + +# +# regular run below +# + +app = Appliance.new(config_file) + +# cleanup on exit +at_exit do + app.cleanup +end + +# setup trap on SIGHUP +Signal.trap('HUP') do + # ignore another HUP until we handle this one + this_handler = Signal.trap('HUP', 'IGNORE') + + app.reconfigure + + # set the handler back + Signal.trap('HUP', this_handler) +end + +app.run diff --git a/appliances/lib/artifacts/vnf/onekea-2.2.0/kea-hook-onelease4-1.1.1-r0.apk b/appliances/lib/artifacts/vnf/onekea-2.2.0/kea-hook-onelease4-1.1.1-r0.apk new file mode 100644 index 0000000000000000000000000000000000000000..c946778e3a93c5a521db5ad44cbf3d9c35bef63c GIT binary patch literal 32378 zcmYg$Wl$Vlur>|}!QCZTa0~7dAh^4`yUU^hf(3^lOOW8M!QFzpv$zvrahBc7`_)&s z?)>QL?&s;#a%Q&9P7PHY8r**u0^&USz1PD$`g@-RmQfCN`H6C`Q4aaZzlzGtlL`xe zCze0+sPJ(q@zyV}Fsm%EFezJ5BND@fWyikDLP79a!ye@0<>KSv<>dX$%WK2Q$;-pZ zX|2X-R)EOF#;(Asp=L(>i~1sGG8#`Y^ISbwTsR;j0GVZ8wC@`W46?KZ!=>!Dbe_ru zm;7V61?mM)Mnr&hwGeV1HWk)Px%ud$>?x03H^9&uqfzgKD!`bE(@?69Qs;xpk;6A* zkdNuA0M!mwwWi9evjJnE(baJgV`^7b_+gh>iUPj_$Hp$u*D=I))+pldc+Ym%Qt)|& zJ#FCG!+aeqgr!^1AUzjryerofdRcl+7W|U2O%V>>G+_g%-7LoZlm2ovtuNT9qE{P2 zI?L2GSoCU^UvX;!Y-NdKXwEYbW)xZ-7jggh4$m9MJJxu!`vBHmE9zz-a&?c~*DPk5 zEzZVykImUq>##j)`KPRMf2e%-HS_4sSj<0XM=Q4Rs$!<}1tPOOF*%p3rc=!vT-UIh zbm?F6Dw1^bQARTpHq)ZuYT!{3o_F?OxGZ+pBG3OCCV_(pzwJ;5c6FT6j?&!Xog#}B&|(PeUdT1yWmpw9G~p8nUt!;?2!{j*NA8TqT9_Q?gV9nsG1>`&p<|z^)pyjsXpAS<~_i*8kV;O ze0-gJWfTb2+&rP|(ox$&$cV7Vr;C?k{duSE2N%J!iNd^bT90%Uh56o67{tmR^UCXW z1%L9ns6KwrdUyxC_-;@=H?rg6_khm8`xa8$=H~V63$LQt6EC_kBKO?c&%=cTC3(dq zm1_b8VVtCr)`3^e-*^(P4mZbykV{`?A03^oFxUe09r??Zruq!IMFGJGWm>g)mOC^2 z`>ppOM;YL!;l}``?4}EdM`;JTF($^8!m3Xrsz%=T!m=-xEwrjPQdhar!fA#ZN`ZQg z+XE%*=HNYH@{4n(&F+T%j+`JK%Oq*rZe>cpGy?Y+(UR`)oTxTJd%M6!uFZk;LSDnY zSad_)ywbq${y;$^d#4?F1KN^ptaajYVg4^-rhwcb*7xZQ7~Vuqw=Nxf1h(M`+hnM& z<%M4?<~PFUOcb95GakN7v6E?jUBax%e*Fp*{QKBF{AJwnfqJhqB|7>ey%aOc(($l{ z7OjdvhT!l|BL8e=eO3#64K4lrbaf-BEd)IiH z>QFtn_y6~HIyXf3!`E)Oln9Ts4TVdF`<59hpOv3&g*&7uHK*`R0+&1NY?nQ6N{Nw# zo5LpH=EoU*C;s&~!uL=F6e_erC;VGx$@p=yeVX?6P(>MRF#>oDW+u1F(#q76-@;=b zzWeql1Mo$SgH`f`yeps;o}?CX2b>2atd{1R`RXP~Igb?V7)I9tUH>pMAvLphd+#)A z;}}0%kt>{{VJJ8>8PNXDZ9~pn9^i6Oi+X(Ml;<9v(7!kj{)u^&+6R<5yL0uB98G%e z8UaafU=x*6!mbPnVb|BBv<#Oz!#(RK`#&9)Hv*sbp;1fQ9EdAb$S{s8)7tswT>uNT z)ufdcN^=ZYCdU?lI^My(t9}HaFK=_8rqxYQCBO{kX1>F!AGZTG8y=$zV4EHipvT$> z3<(zK_UYZjU-%&v+t{O|0RV--o2zvfaOTajw@q~0BTa}#4!aXdaTEn zc-8S`T-SYM8?wn1TiSypTmu*EtLz?dHE*J>tg*{b3CthfyNRwO9iuRgyG!Y6oa^$-i@EqP}w~g|NG~R z-5x%b+S$KXresvLT-aPeO5tPput`&% z@@so%DJjzYbzGQaSm!0*1Z zfroI)vVuY1(UdoqgO`GKr}n?Qkk})}M3*v_c$+M^EFjFcvC}wv1(&&d5{Z)%{+t)9 zju_Tk{p~T<{<8YGr(13}2Ai z-sL4m#rgTSmd|@tC>44=_~7d6<${TBpv8Eh+IqphDl_WusF0>eiQt>CM^#h80;vKE zYL-(c#0JDap}!HY1whCZa6jR1vD+`43jRodLn%>yV%-vt>!A&H+*lMahGrnJ4fwKz z=EEDjYa+G7xB(8lfF(HKs)vreCCIGK8gPzXB^D)W;hj+e5RU<2TPSPc7Y?B^pbZo) z^jr8Dp8{ivj<9=#ivhlu*I@xTFJb^y-^+*uU}PJ39nl#68TpuW#@mY*XKJli^g|hint&Q( zK@AOpme&B^<~8NJhkN=9TiZ3X>GrbP{HqU}6YTblyJ5QHeQ<>o8x{1bzTb+pO z3Y+CWMBSn-){2`G&eNNF!H26Cj#PsBJV#@Lfy>dCo?-(19`1W+zDMCvuP&x_ExX;H zT*X7`6+NQGj#?L7ViHA+V0{moe3NZLR$e;so+k@`t%Z{^b)B}oPX;5|@+`W$Gr(t% zh`^femz#_vOV@2Lw~?&=KX_29&5ir6v`TBojIM`Sjz1^pLNzPUT@b2cfShYS`PS!` z)#RRT$jN+Z9HR})uggw;l?c&hqD^l8sqS^2pIOJTNV6ljxk>Kzwr-grw|dq4{>Zdu z3+H~N;i4ASAON;_Zf>`fedds6{(zV`MI+U!9go$+eb~EDSNS|SKi{lc)qT{0IwhWI zC&2YWar%8IsKIo+j3{t9DdZsTKu#Ry_Na^a&+w20V7<4K>C~FjLa?NzOnGguu$!hU zz6sWz%x$4>tj$dGVJGgoE2`8~z1Z*(beIv^J2J#@hF)))P;}XU+uN_b$~``hYJ-j+EqgW$0()04H%<=ZUaEtvPx1q?20OQ_yZ@R# zP?=Idl_zs4&}}JPd7vA0{FmZ$;GB;~9U}KbIs=Ivr3oj^9%lo1Np~GffDKcXse}-b zL;2$(R9p60dTrG$-n~)iGuGD4xrmrv$4etX#Zc%jJ4WP%AMn-;;C6ZWFd^ud-L{6iwcSz7epXCT6!BnjQ@DBPt!7`-c<>i}Zy{H^rkFZm4 z{{fIa={Ts(2OK0Ft@*j5p|;k4(a?a`X8rZ$MmSfohp+PxLHk8-a?o7Wn2aj;rLxB4 z%#vt}>VEDh`<|ymX`2nGcLOBD_)B%CS6 z*xdBs-q1q(yqp#=re-ayqN$IO85BIkf&tG2aN6fyM@y1E$W4oQ{rN#vw;q=YMKTxna+F z%zkCNx~W>X5H2*-v1O3edC& zfrw!3uC6_^z#n#v>{{*ExERXuXL47=LZz;4kHvg^rPRoyjFGEWh3x{?kbhf z0y_M9yV8OnZj*k+y5{Df8fN-Ze+3E8D!s&hjAKWe7L|dV(;RE&FdwJV?5=N?siKCN zMjm0vsZSeZ=l_i^41WH4VJX+KShy*(4ejirF0J|M$4+?7W&}C{+1!fU>%v zujqQt`lxW{vGYXVBp&6i%V@amItx2vm%pyVeWfuy1b)% z5Pq&%t-J~Q7^`k~f+opC^(USZb@TJbALKZ^wTFSqK z*qpUIh}b!Be6+|d{%MInMp)S<+yc?F&HV#}il}O*#uH#B`qwC)2}3 zMz|>7wF0E~X-V*@L8z2$>Bw6TI4u=Iva6k5^@{JiW-E8RmLjYEv zBe54-O9WRKM&Hr2^i1m7`p`mWwSQ7MYMtH}6^j2oWq-zOlK!?(n_-#1IC|q`x{=gL zZ`TnAZUyH<+vd{mZC-Q|VxA#Llx|EJ9UB$w{u;jS+Ip*LP+Ta-Pesr^T zO|WZRqskpE++q9(fWf z70Xxs1ge#WA5?MP#$szn#iJw7#TtjM9A^2nItt4VTTm$@j#D<{ZJ8=6TF$M&k1&ob zA`dJ79U6=qYv?_~66_tfSoAfnCjDfOFb>ttS;?Ih`LlNS2i?C~rxUMb0QysL`6?=G zTW%}&yl5yVt9Cnl)N}lf7$u&6JKe>q~&|3 zsw z4JEwHQSCO;k1PVeP)y1u#|$4^3G+tdzS&TZ%~3-b&TFr|TUI>2UL;7lc;OoDcaBxC zF?DyuDuSLuL>K5&COemr5~)#y&#pOTS*<6z*5A2>Y&$}u2hJ~UuPclf5Coy2@Lr7-lT0!s(oGJ<=NxXtr-O+n(_+JV%47l%oP8QOanl`J@qCupl@;srDg1m|Xx6iUqIq!o*2|Z&$ z{#ak#a#2A3)f zWuwm8LUT=dk^!?8E=qarGJ8Z|-juFrQ!!gV_JWiN7JByB#0}U^KHwc@?dE}ZmY`Up zt1jwGukPMOi3RYIuS4Jq6 zG17Ea(hu8Lrmtr^p>0kh3<<1KCasiRZT;l@^Y|B^^#T6uep=44g#&}O`~lzM=)%6s z-)Z|a)0c~3B>O&Diy~6CS{Qwt%A8kuSGG15ifQ=eSzK`D^5k5J4*!w06K)y$;2*@h z-m$Qb`TavnK^Dy{#sMxm!KITXb-pFcz)6{7yPS!Y&h81hh$osn{&Q5C`iusfsS`fJ z0Sh929SDE71l=9k7uy?uOiY=S@Q+W!xQfi#RVa)%p};=er}xw)cVS%0?oBB|VQ~^{ zLD@d6me(iCei?m@c%R0gbNi@wg$Q$7-)Wz!(H6c^hD0BGujIx@)zu+y(RL@YHc4Hi zC`fALrKLz^D@m1*sLrcEBg5;Zd9Th2@gud^ue_wnEF3K<^Gbhx(iF){wV#`9OR@ME zQib69tB|#u6aP#h>-P*icaec*hP&p~7ez9fKm>P?djl!!)Z58nm>{%x_E^%Jh!%c} z&j;SHY?9ZHjjWR|YwBmty>1!ZqbgE29?mS=@Y9xBiGCgbCl9}1hcF+^(sj%-N8}B& z$9j)~?6KaLP8Q)L!uL$d11{gZ@!f}@>m=N>Lv5lKI7%I6rWR~i-RB?Li3u?!=}eMW zj{i}^b#6kF_KyYclNoiv;XdyEw_$b&<<(5{`<^y@O0p z=S>JIH18R6rS|t-IAvo6Fys$g;-Px$p%^B5OSyfS>iUIm@83QzY+M!bAlv8`ILk`#sdwFR04s`TqhB4-(u3%95#D4%Dtajh%!1^R$u)? zOfT@a@gISt_D@22?pmWg57sFLUa(i!tyVd4f61pP7x4+hv(h}qXe zDmhW!sLhk5w-6Tz>~c;s>B)jCmFmB+dfz*Dbsc*{d87-=nZi+xRZ_zVH&lG&I3)9Z z64{R^sgoaY)tO{ce$9A%O!mnA?RVzvxn5n?vUSsX@%JY|AwxLTu?yMi(V2@4mE&{_ zLLqfwf8gbd2#*mW&v&YSBUJG{K%V8hjU%xK@2TJ^&nfVSX}dfSd)0a}*+n1cMj$6j zQa*+Y5oCk3eOZ*JwdhOjQ2+GqoLizW_oX)Rw9nt-=o3NLOg|6NBG(p|)>L-=o8BlD z-3X&5&7w{`nvA2PRTtZilbt{Erqv&({;s(}ciO(^Mmw~2_8jZ@F8f_y_H$GVOV=U~ zMd~X~L}jnKD-sdRL6-+oib`euRO8Shnqo>9Lw?bRDCE;ujz%zY_yphiUapkLJ{Ax0 zZtpzh)pm6NLxvJHSH46Z;rFIcGpd^WBslWpa76#ue$&RnT^hKCCtFH({rrtHiwT3_ zV828yeXG`4Gi&e3MEhTQvQN}*u4JBbrO%G<8PzD!8MZ$eNaRp0>%wjTJ3XJ5IvuQ}ful_BN7P0)iS{a0iJRMACt z1ZU@}I$4h}hG6shd=}vXhBFVZ`{}*6{hhhd=tE=v3SqecPeKhWgI@IyCgZw$shh_l z^?L_rv@i}j*$&0=-(a3&ZbRDP4%%s>6kW5E?tTSKk&~%0;nvGWrE%m;8;uOl8(F0> zsQ_NP-9sE%S)nNa^+Q55KywvT4Bm17J_8(TSA5fvw^BNk6B(!}%XB2JP-S2S8mP?f0(RqYNQLJ^G)G zHCzwHTw$J`heUdWR*4kCHD+UNu}DPL-yP7R@W#no+v=$KtC$=to(JEhD90zMQi)Gd z!yHa5pij`+G;r4_(+kWPBA)#Ty0ZzGP7V6o@b|>Lz_M99%~K3({^>z(aZQ?PJ@9Ih zLx`&Ga4H(2^YT(c8Pq(CdJp2s#Y$K5Im%mk%EhC)aP`V_j}F)=>b$ZxdHl-J3I3NK za6@C3H2uN{2M`xaB7BSv)eTKTgprs$(sBg-A=-L+=oEe0wag(e}0;g8h5B}51AB=83u#&unTMzCHe9$ovoKRj;# zGAXhZTSxC9ge;hg`Rzz2KsUR?wj{Nb-z zp4zu0ombu<0M*r9Z_hU{+?BOJVyJ1tGzU@$h8PjXuK$M-jn9d`J4y@h9V9kUIl3G{Meu_D*f)cT z8VciCLWd^n_K7WDNcvxipk7hI1&JogfneYF&>o5E0AVnEF!>`xE-Y;O8<^$_-E9jI zst-aep!o4NUU9P?xE``2cu+7GR+xCy9=@Nr)6v@u>xjC^A-RL@NbNrWsOOGc;^yQu zrttUEpf)%$lvlcdLurEPpwEx7!#-*vU=)rZ(MQ3Arw^S3dq_}^TvcjlSZ_1}41X9A zGD+M`Bu4nymDSoSp6HJrL_L^_1<{yb0rd<(Z~Ao+0mMgKpjHcEL;&_k@)!>Q?i@IB z9}ST)L4FaR%;^ZpO@d;K3M=xVaCZ|N5VhARnnx&R4~#AVw(uIVb_i90)Jd?yv74 z*mWF;-FWMGj~@k85lkcAro&+ljWF3~>FuT%!MOsA1cCceb40-QY8*G-pd%jgq+USy z89eCqItU4xEDm>Xx+{R(jWF_Y5Sa7?2k{i>4t?mO8V~b>MqMC6tdFVuAg{SAs(L_J(hw9G>V0ksGVOa)L~}Dk zVQlxpS3LFGVLj-}?|L&Wdk}k&kF-Ix{jX@bNepIy$t!nJ;HCLF2gu#vE8-T>3(g~d z9aY%F2$=dRL0~HktM?icLLlIZnAB6Mg90(jMGYMR;mXEL^IgJ@3It zeiVoAt{#KH0iYzye7K}(ZE-lrVisdBs(C&ffGvMHv)Q5rBRue&uQbMHEMQn`6@3 z99{b$s8F5G5KPFwH%<@6o161(m?P#;#3bFVO~A4@As1P&Uh&vBGw)4NeOo<*`7P)5o5A=N?>Y!UHHkhG z0`-i%A*63e>>FbHR#EML=zp6k^ycJwt4#GjgYeB5dt;RThYmwf#R%RC#Cvm0zZp=< zxAgOGRM_V?+QWZs5xv1LZyq3j0c;U=3b^ZeHhryy>cT4jzuDQ7GVieIrn|2UAb=$; z#lgIbEQT2SgAPXA3luxQU@iMJbxY&9T=~ek z`Rtp<$BUCtp<{>yD>8^Xje&=>C3Z=MDDi=dTMH)_k`; z`b?tAAQFDZlSnO+@cZ7N7~<7AM)1Cz#H{0 za^y`vLxX+410F&doy=b2O&Q3io=L=21>R~fk@ge$e|;b=P5TM%$iK-?JF#yoMMN^b z#ev_+y~zjMM4Q)mO*GXvS&G-4_txLnN6a@W_YU#Wv_SY(-djL*WN95#a|0InUj;jn zdvCeTBTL^VB8@iE7&6=)iTCD76nbu8 z(<3$C5~UaSf(8 z70t^iqz3yGjJd=cmmS~SGqn`34AMLsxi&B!Bc9CjqkI1e#DBS!*vw+Nm2tw8sdkAM zsJbM=IhGn?G&a*Iu&CB{Vv+KeMm_#!gT&0&Be7#zFGg6ArWF@)P4m@W*Sy$*XZRF> zhz$SCw|(Yafck+8ccZKZq@P(wCRKT<{TCLAm-I-b^dOU_6V!OD52N|JH|j{q3Kcpw zAG|}nyyT8TaDD1l&I0juzSV1}O~_CSy;f)WfGXZy<7s6!CK1DF972{;L`oH7r0E@f zm>4_J5qa2R=CphV;k8S{rPf0pooQkB9D5uv;Q52U@Y0oYigxGGsq{jHX2U7J%2)sc zaEbG&UalL)I_99UCp?-UC{drFTh+0^F*sZ8wZk!%UzwzBTKx5#FxV%hdCY3F=F;qp zF4&16xLm&NDLSw4MSpW*_AzP^$Eh&mvp(wS;w*k7+i||}M%wg6$}rGFUb)b6Aqr7d+ zBaa0V#$KmY?KrUI_G6MvwHjEaJ0w?uri6Z>c&owaju+|95Q4v?++Bx}6xP&G)U@ND zF^dNO({L7`Q5*&?k!sb$B&leZ$Q+D>pM;n&e^v=EAYeX_h-qoFmx$bbc+n(kZcKA! zvN`kB`0gHRFfAJ%RGc;@TylMbnIWYPU_SMkX@$vVs(+}>D<%cdRff^#egm|jYPlmI zKL4UXVEzH$yk+4Kn&1r`F1L>s_9>7DEqgr(AHXZ+KzxNgWzI& z$7EL5h;GzW2ftEVD=6g)63c7BW{Fo@2JEx5)}ecFS$m6d$a{6*fojg(ZGydE2%qn$ zr7UiDX`jbPPXG=W=m!h_d#nL(xt^qyypcxlq}GVh&dMxK8} zP9mNdD{+R=ExmmkrzMGT?^Ag^`pMwW_Fb0|Wjc8I^zO`yemHuN-}@`zclQgK!hzW7 z`6@7}#RAa1j`I4Hay~t&vnpHL~M_t~h{B)TR$}Qvr0g?aw?zSrSu= zgl|t#)Q8=zOIc{3D~DfW`e>EOog27FNjK9weaX{enN;Ot)j=uWdN=B6ReSuc_Ef^dkCu?B*my?+jWK=pk*&q}v+|XF$bIb&|ri#*1r5 z`A_w-X@QWYN{Zps#0`_wU1yisZ2)=U^p9C_NYe+V17bPTj!DsHWh1&{x6g9o261AQ z+FxMf?t%>BcAnn7Klf_*TndX7{>^bmoD*rf8rS9_aieXx8K{sr8f$*jcFYzKf(&No zf4_8=l$|C=l=UnwT+W|6tR>X(YFaZ+j)+6T$oxLBIQJN}$ByhhV)VYhNv~Uk+2YSv z;quQKy4I9up1QK?JJb7Bi&yM1i`8p%dfsd$y{1h(`v<`ffd%?$hD$UbJAb~IB1WL4 zIe_S_$LC1}lzJ-`?+g@yXANq`x;K&Jnz!}TF2A>)plOw}CU)+h4PM;!bp_L(YEuQ8 zCJgOMX){VJx&#VC(7knosw{vwOhiAMun8(BNjHh>8~pU}v9>F78V!kr)N1uorH6#R z-WtX+m5@~+#pNrT4jJq9#rIe!^P>4$aAgyhxws~y$IZ)?EbPfy7n=MUx}R6~cX^i~ zNJIx-(ehZ4tIucdR5qp_uLSbFgQsZR4|nNen!VzGzD>NfZ-6%L{NsH-Pl_YhL|5`D z=c_K{Pa1=x{AlEH$h}!%Hk;Ufq!FY5Bca-?fc*dld6)P;dwi=JQEM;55$IM1YeJw4ic+MvpY<0dFsu_|{P-Tp_NcZ|2bYxQfU(SjkN?yf$(A;DmB_Tk1#qV;wlRKxKbk z9ikt{v(@nlBATF5hP$yJ-l2#L(X>>(OZ}vIUev>Uurf4JSmQ(BANQSg{yV;UauD+_ zha=tki%|>es1L*Xm+to12g0VCyuipT!16qmJ#!02z_SNed9~>24bJ(^^M zo9Q|S=QWO%?H!KFsa4&-)!SE=S+_OIYS^M>@ngB)H%oM*?mJD)OVWI%|%22-kG~^5u;+LeFxgv0Hwt6thb`+;TemObgy9sG;uXqRv@LYU4 zF9kZ!+|OHlq+tE2bzUOtx}bd{CZxA6hbr_ii&B4vxf6Wr+?3>_<-t*7d*&C>l7zW{ zF^L~|QtvRC+<0DOi1qhAd$qVZ$xdyv3@OEqs=npxZ@bTdF{_gV=fKL92+}CGXoVH_ zg#?44WilI<`?`yq^4yiuRk@&sWZMTg?{rqjqlOoJ$08vm%mmy%)`id7c#d(g<;CTJ zKN=iK0s0!E{X?`^A%jCJQ-rCKD!l0CRZI2iD`)8*7Tr*6U2hiME>Q^`e&75(Z*!e5 z0UG>@qLb09SNn48q8}Be8$K4u)sM)ee(W$s>XL0W43|mTP>}sv$rjOpdHjVS)wuee z{T#J1xR|3W;Y6!ya=c~u(|F7MUwmNRxNTmBV`RkK*Awc3vtB6|TiYkg_GF47F4MVl zHDYnYPqF@c()rX->pxPF&pBV6#ngyqm@DWGd4pTk$QwpFsZ~BWl%fTfZY%bW+nQA0 zcDU^bloGa!sX}G{07oki7-kKV9hn6kz@#-_f=%19j$+Q|p0~Mlf=+$Z2{S9=?YZ=m z=83U`A%c1BL`*GVMbd&wTt@w}x+X=sUxmDqRJ5Hm2T_6jZky2&Ixj3OWU~1+*fwAU zA58Mkp*9zx$m4qYZEh9E)xCefyO(&$al@;QPwnP@*%qS2-MY?>1x>u;tFbE3CdySg z&w~WaQ|Xh|9cS>T)bqHwJ&n|J{R64d zPwosF`65aem>%)oJBt-3*wx<;k+PgpA6a`e>&aMB3JnK$JW|=!CooIAjwG(_)gQna z|ESplcD3I-$^NlOl`Hbqufex$k*Ys>DZ?Y<&7>C&j%3^%$M71?%xEC>U>9L{YAV+e zqFO_0nhdhl{j@mEc+RKNjPqTWy`8iRu~TAXty{E1%RI*gby;%C_1} zVH3-MxczP$ko#0l#8BcjEmNO)gUmDAwxrQ*)1oGy$a49Ch1_eG;HJI8pp`j6rE4=B z*U6RsVa~gUsWqLwxZG5&Cnk)l^yMlpd&y@fhji+&HCQn39a*Z=J2Bu`C$an)Svc2S|CUlKHcfwk_nZNX;*!}`jA7ZvY zu*WsvJCqZA4I@-jG)#^T#h2;&MN8337{|B0^|Fz(fnSz)cU}GOCG|z%=(2G-1Q>kZ zrBIOE)%NbA4jw|uCbr|W*SLUe&y($f$4^&hB8RMAGMy`wAhpCGU5;a%?nb0^7IH;B*T=Bz z$xsBQ%v$L#2<1+SDon~PD)i$03vCK6d?k6I)1H=DB_5UF->~wXY+uxC<2OzW3#C(0 zI?;ZB7mMRSMCDdW7&6+9;AIS#zG6Y7Oeuu)TeAqssMB0?0Z-=v?K>>9I=a%)1 z;JEzjK0u5Z*08yz^|M~#59_7RfbkdG&q0kcrlZtx7eW5KqO=U*3C3fpsru(gN?Z4S z>ga{e4%ig6tdE%02&?51T`>G1d+!4E%-qq1xPqrG^W-Rr8)EE`l1s9DqA}spL)!&Q zF^bBF<+$mS^Xk&3}rJ~^YC zV*gY>CD`E^Y@z>J{^Sc2yw7`(b^Kj^)v5O^*;LJ#fg5)d$54;_VTWK=gJtPcVZFwN zcTMS|QLrN1p}pV@c{!kXajjoR+YUOfY5LfrXFwDwI2V~91RYZiT@pu_uVy-DFi5(R zb4ICVti#wOwX0}id3!5gU02H#Q4Qz1KxAu%XN{wv{TMiF@b0xSJcNo=bFg6z)JyW4 zHQe?UPki<{{ad?8VKbbm;;fbJ#bqcJi>f084Zmq`B*IpB8cVRfhkLlw8@*>Tml=C^(V zh*0!z`yoEws|;Y_M-S-g01ug&QrhG9n@O*!&pVprX}DtJ?xRgAee z!sMA2AoQT~SZLBzq^!I^NQ4Wd>@1#x&{v1k;`;!y^poMhXEYJ7FRr0>qEUN_a^zdK zlcBOkFK!z8F9dMc5@_L64({^4>IDtDv7!+&3Al45U1qu~8^7#~-`m!w@Ji#brZB0H zn>Xmn$~GtQ=7nmI3th(^5B-5!=ElY$Wa|;c>a$|P=RW&lo{+QaZz0N%-xw#5dEX=5 zhG_4_7Qm!#J0lpTBC~@x@(O=2!6zlGryE~2)$)Khy>8mU7l3DgWjDaN?K;Re|9n5l z_R!e2+ndq8OSaJag0xQpq^Xl|7!9)lUHiZRc*9p@1eaQz0-TAk3eKcNiT0!GcQOyTWp32A#SP|S=RPk1(e;pFdD zx^PIaxI8+cOZl^WGTS#)=W$huEtJ{+dlA0;ki@(A1{vNWDv8?^m2;O=h(sXIWq#gX zI#9^m`a8D_>JepRqbQ>ohdAJixun40wtI|?NKYzoL|Q2Tn4cm&Kv>-8UKjqQT#$tH zXe|5UdNQ9RTr3?pqPu>Xb3QQ}mjd(4VW-6C9|=n0Rz!t`MD}cE^L(a+NpS`h!}Iv- zRVVOCP$-MXV<xblyBTN3cgFS_UFro z(qdqr(CSG0MeOvBhAgA?CIuMd_^yC$j+FABX=j3K{;b*b!zlVoT}w(|h)G%e$SY@& zS>n>J8Ku1ZMQcSjmk*Bj7v8Mq%kV-B*#&;I(8vM~9#gmmv0$B6O%A?(S^D z)nj#-2L7LJx8N$ln$+da#M^r0a=>=IBhNX>4FmFkUt36HG)(QK|&PCY?Etu%y;5%LEHN z20@pe}RsH!`JSx5Pz2+hRT#m2<6IVoam7B=!1`sr#UPipnCE?!9N(xL}Y zjP7`UDsm^BlMtyc*0e%?)GbdjI>#%_4L2dAb=TuiI-K)Fwf|8zUqEjZ!L3CU_rxO1 z5{t`u@R<;9{8K_;(<4$t`{Vp1dBzcEVO)dDm0=$|Qyqt^hxe-x8p-D+)z9j(%s9!caeN5s@`W;ABg53n)KQ%4;&xxn; zwm8zCU}I$f?~o^{pn58L-b+wbzRk+^$v-^Xc1r);OUa6LJyd#fM*CNuFSq)z)ozr; z7o#h|=pSg&PX#w=f~dyD|0EH-@B^yk$kMa^VkXtqub{%Ac=jm&9pkM4A(TV=!me;- zvTNX&FmH2aB-)04l%Z$NhxLu=?O7>inmeh7e1hL+(NFtAZ$XkPR`=x`UQbn&^Ny9$ zno+RW0xOJsE&NyhYc{AIulJ}SoMpRC_zV3}!_!A6PNLiyNT!Cn_)haME16RW|0L{?2z`gGBnHfu$Q00UD%7W$QQ(wz8euU@gHtPJP((C|(BuQT*8hCB<3mMz zP0?>^sUKnuzo|kb`^OmU_Jnt#AE}L< zy8R_M$~d~(u(hT&uX^TA9Mr3va zmBuJ(CQH@bUZaE_IN>P1&pn}gUCSQr1s&fBJw@ZnSHTIjp2FyZH>-xE@2$KKC!$ygOyXBJ2Qk|9m+Ajj*;fcRymNzg@z$M-(9VOsklr<=wm_`4ze zDy#^V?hP3D^*6mY|C%dv&AWA1AyY66VSAvNzyINAy8ntoXWj)+rRC4c8Y`N}NOjO{ z33Yfo8!&>K$_U0-VTqiKYCsF=;MmZa+75Y4Erx9DS`UWBOh?yn%4&(fn+6d}Dwfq;J&|2{TiE-hWLCkCSmrnVB9b|rhQw(KRD#iqR4L5d~4wQjQKSw#M9od?nY5o zmiS1DIIZpY?(-TlPgi*T?iWJ29|WS&1%8GBLeY||Jm%*j_uYlq<(aiWTJZ>Z%^Wc- z190gLA-PFVg;VIWq%r*q@rZfnC)v)J7C2zdb2zC9(;or zkT!w_yE+M!#3dCf67%O8?}?ZX#HF3je5-&c$x@iIbOwS^i=YyfW5ch_efM}a>PI7# zRB2YUSD)eTV^84e^(T)*#$45RB2~+YYh_;(j0O>1sF!JQh=3%mL`IdPgM8-!3GrOmzYPWX#p40{kS&QlIW@<^EjWO zc;2_|m=q_;zdE5^#szb}Guo^9gg%A)KHJFonP4ukC3jopydc$j7o#m{v^DHq%j$tl zZRsJ&2YDf@{1qBt?K{r#dH3*je2e?|*TGSS?)CbAIP4lSH^PGhb`Y22@! z1)hP)%Q@k-63_9*wl+qPCL8`ngvq|d2Epv1s!>kY8bMTrKmEkpwOU}Co=~y+@r7*q zurJn3*~;@lGOj!Sh2~4&1ffO9^c65in#8?dtkdw56n`rf;$;2@_{s0L59HgzcpSkf z7eO@&5Z4J#4k;~d?;d276#2mZw(^|cbm3V4EzZrr@Qw45B_-$0t2#fv)lBPL5^wm= z#;8a3NLLW^)bF<-O(E|=Y)`?)=V~SR+#YP|de-8Et_^gP+Y8o>Z*yPyaVy?>Zmv4>~~) ztNNZAh}Ub#njFd$@I!SN#$9U7R^R1_z`@HRnp(>hP*Jz!JDQfeYCSzjLdeHzs3mS6 z;*$9*?`ZJ>7t^wmN%)Ckq9>ub%_wC#(TYMAu~syV3w z3PXC0(V%AvbamH6YIK;!&y|>{x=1?w|jHE!oY+{E`3Dz0@yyD3%pOQ5^6lMUoibMs@xh*l1vf=PkV~T;s5ofBKk<|4 zYUHP4x}ZISbf991skN1&l=pDS9)!f4!G{SV#DLYCu+MJLZi zVpM3oCVWeFIi}dk|GESuvoCJyBkfVBr~X+`DK3h+WQ+B%X0g|$)pWYV+w;&G9l;te z3oh_G3Od{794CXNzN1unVUT?}#0#Sbc2FMJ8m!qIRBkHUeM(rR)V7(`I5JY!)UtpG z3{>Dd|4aNe06g#=IjGDy1a6P9i5iLpyiQrWOktGPphs)4vPhM5uBA4h%_ph-#QT-& zsaAwnkd~abLDH2H@$^(|BJ>?%?Om?*ZUar)oEZFWEJ%MCNxkb$FKo3&Pt#5EK*n&` zRm?jn^thr-4C?%6>16%zrzbx+TVlHM>S&T>7~h6b^8<#=i_jE@%pr5}6+2*Vu=-3$ zGVU$?JxU}Gw{wCi6wbn&eirnFRMK<6rmK@uFK{0cZ)2$YqluL1ecFBpEJCG+@9h9z zeMOJ7+S8AweD;kE1L7^1*xmbVT8wbuFP0v+7ft9R*Egwe&OK@)Jhs6qrn6TU!ebx3 zD`Fo_(d5uIaJv{e+%09RG1t*rS6U=ZL;gYl!sXTM^I+fLb9!M-l2#-s5U`ZCIX#Z0 z!u0NyGBCd+ik3~bs5UAyCy&aVWZYpY&by3l;jDyOBvj_QX*90YJ2Xz(> ze8K%|t>#vZCk6aS7%onstB3bG!>ewN(%V)AOE_=X*t<*yEslPyS+?k*IMaf)x~#7T z>3U4Lyk}K~KDYup_S#$#5u5>n7$1FN#74i#Uv0_NU+(4wKi+@&K)@Ltk8NcY7t`d& z@A86Xk;;#K1tCqa>M&$*B`(tN)XjaWjWu}(oFq6>u$cr0e6io9wa0&heswo;{H!4? zg7ywfHEp$Zcl`BG&Z#P|8~!R0;9K21o$YD~-v{?#60(LDK`3iUIqs_Le z*m}MV*L{2yxft|ly^6aSer<}k{d_x;c*_Mnnu{*}t$z#8BOscn645$0`D^9!E@Ll} ztKB_dx_#(ot_7jRCxyQ?{ zZJ(&unfD@u@Gb(~cuLDn3P!l7W(bTb4`@Z4ZI*2z?CzJ{Su8Ud7u+_9apz=TYTsSx zp~CN0Ahc#)7Gn%e5p3KFGdP5t=`14eZ4pMh4KNrc4A%?dZbr_%^&{Pq2b=d~2rc62 zn-paag;mK17$j7?O3^a>oj@*9VV;vy`_%y8Kz>Xm9~O~qQzbF&7_v}IiFy0dAOC^Y z;*R-xN?fK3{!mqtGGYc2H%?e zW_C(o{bDB@m)O$;0i5D_F}I2jZWUgk^DU_pH1-J2+!Rt9K1w(1OhUMKZSz=DSk8KGW(A9& zC``}44>w-Jco62U3ID_z%9>tLfaJZ{UF~GJ1tH%Iry+yp@2=ds?9mMoZnx2I&dlCl z75EK(FKEAi!atNL+f3A6-J<#5c!H2SZ%N)xATaMBAokgC;PBm9XlDRr=Qtk(4aRF% z*2rcq0QhWLmw0x2umTy-wc(7efup%5lhpKGx||zX|8i4^Q($2BeIZU439^eCT2(W_ zr*?`>v6dAErxnyXLMVorh_TPNsY5ci%nRC8qlP6gli+@V7hMsti%>lf?MV0frN zm`zS;?Fpr=U4>$TCb$VD)EZwRdG(UzPU?XOn?;r@LO(PQ1UeEOZ4N7O_1!*xVRoDq z8P{)f@P2crIP*f*U*q~{CSzy!(e-7j$yj;OXRcwwRtc8FFPz$YJv{j=F^%sJLe5PK zJ~QkOun+7-1F5D@(}HgIB%k`fPj^}b=0O^<)lMhQm5W?xv95?WBX4+5a*YJlMA3zF zH_?1pTjU)vXOqu7RLcN_hg?dl)Ic0Un+Tz1 z=+3Z80a|Iu;e;BTB8{Nnpjm2o_3^O^{ zMvM{$uBHtMAWy0^^ctrOR5VuoYNlw6Xuev4%KU2BK~VOf;(ErcR$3JvZRcaTnfBcR zR<{VpEb90C!Z|i?WFtP)K@R20*RgiC+?pxO|5C^{PmC#J0i-_?#3mXBJVrh@;2*wh zC8p{W_2Wr2ALA+IDnyZFy6ZX7h+D7wA(}h}!0}8JB>be-%0a7_T}0K-sBnge^X2*p z0s%OpkEh1m0aeQw3$pS$i9)V#!0)8C11(E?^_F%^gJu^i@1@6A3i|qIR}fye5Bq?A z+5km2YIYG!r<3Qu{zB{iJ7uw0hA^h5rVKvc7m&;=XMY`mt{jg zWZw}BcmDoCo@4!M69iSr4qzMeL>Wjhe0NHN@kuvgZeoC*9H)v93Ddq8XYu7eYhfH8l!wgQ#M+)h)Pa?wEoZ?Ily1eb$5YiX@ zBlAO3pU^_RvQD|$Sectsg}VE8TktC>rijR`LYV_Df5yuS<1G!kv$$hCo9{v6llTvi z@JrGGY^QJ$MnynBynMGkrHvBpmRfDVO~fxh3|NF}q+293hN3*MyQ_153Y;;k^y5>Q zU!1W!woVH9air!>LR)PNrh(<^K)G=_e&9BY(@LEVyh3t1n5%kKfB; zd$!ToqbSO*NJ>*+PrWIyU45+(L`W7dzjSPi+~WDeUhetl2-XhA0^loTw2ENG@dI~R z%_gUn%N@ITRkDw>uiTDpflGA~`{wr_xCzGX`bbJ?dbg#94i|joA4N{sk}04zHEOjY zYGWMk@hd*rxd0+o#Z_4?~=!}2#*cuKJ&is<`DTJQ8H6}1o7Qx-H4 zufF7RNJckfkWj1ar#Xj?p(%OVU#pKW{Y~PO*b5)V(T%)#_7Y-_7T$0@?k0;NHhxmR z^~k8*?BBWsEdh&h5rj=q9?aBPa9G-Qc&Sm5w`e>$j2y4KcV5=eKViwHOwOf*0~caq zN)PGdPh}I$y=H0+>bjX<{}|CKHjegqm5)iVIeirchW=>?ewOiAj-dV?`XizMkbx+> zk7eb8@jixDkb6zcTYQDqKzY4oB;kJj&9_}bSHBB2bjv3!Awae>-|Y9yqX;F^jsV*s z+YEVT3awEkoziWzJsPCsznoDPL0PAbPnwkp@;wwS!V&Y4q2PbK_=In7-w_1-a|f}CmTT^c?Z?lM%l>k;lBiD3|8q6p)I2}eLX-?_Qzha=byq> zPL2iRiqZ9^XH(if>pzDmr8`@$&*&xy_Q}R0mX+a8Vye(R@^nl90e9U&1c05nUbMfp zhRC&P(e=DA;-Y^DMd3A?DByeA@c)`HsV|xJAY6J759`3uj1;hE^DE19H1o!P~q=48d0w}cA)>iLrOUUlBPS2^G=RMouT@Xq_Lx)>6asA^k?Q%zvegvB9D2?Z?%z%YMNHPmVXm}{>h^B7T5U^ z^5FA(O2cKyXH!SQYN$$kc_sU;Y&Z1m<$3c3+4pMobNR<}^{x8z^mP<|f!her6);g> zPW#=iqxIOqV|iY?8f&aFrIEppb~n9|Siql*Wx5pQn_JpyNj%SC-~1G+I8{`70*Hf36Qbavm6w#7b9ht81*6x$WHb!mrd(sG4oV8cQvcr{AY*8V+*~AB~@9W-|qchjg`c@=*m3~@BH3|TSS2uo+LyfK^pNjtzth|i~B^1W)>~Pg#1R# z3B%s*h>xMsRQQu;#t-w!CzkXkreNpqmN@3_B_vm*NR8PyQYaU1Ks-VqyndXuQJ(Vm zt;iGATGpqG&aYrLfbSjPckxXgNl&n1S*7)m9;qSpe{gU=6#@CqjXSWsV{-L6LPs<3 z=pI-?#nHTt`$*v1UnVUN>1O7G&ju8-~tv`+LmkF3Y} zO-BpXY`GfCZuyt|8lmwx;DiZ#-|2}dwAfQtGRh5a>)M--jHx6pR^o0`T%1(4W>EK0 z(#f{5%8lD>4a=ZVVp>#64KpBfi$ApDWHi77hp^3_#g8tp~9~=kD>X0bY*`~1+E5or(%mn)_^iw~aVyr^Li5za%Er-3(&(aMhR)>Dp_%g2B3n|n! zzBVD#V9&{^Q{bOK(KzIpd7un%zp+IEU;bN8eShTvFFE8ryL%q5bv!(0E{cHT&jLDr z2q)iziz6Af4h9w>Ug^WMHrH%08?B1>z5NDCA{3Obzgtf9g23g*N!{J0I{Ko+wGf2N zqbr?*&v#~#`yx#}pDyjU9Y#u!7nKbDQ{%@t|6q?Mfg<}~Ia-IX+>9U*R!)9{U4kz| zvrzuENXNdh2i@9Q79jedEBB_UCwU)q4gNUZdj_n{pp>A5UH)h>( zK10_SRNDg;?E)ggEi6*k&&Hzw=>4@RGV4Zuh2!O^7SLD{Yad~^fhzas-r<;|n zf1*{JNJw|=>1Oy7)!)3c7Q8mnHd6ImIr;DN5I~Lv?lD|@!Qt#CfxG~E1X)b=x|9`35i+(@*in8CqIh9w1hPn}vGnPm|s37usz2;a}C zyf3X1T4n33U~y#Nm-kbn;^jB*e+ZI4>QMztj^A0Ojkhh_nb~Pbzo>?UpCzbh7~D-J z99uWH(MahPD=a;a7qIQYwk=Yq3lifEjvXX4rn?t%pEhuJQ}Cj+;(0SiWCmJ~LY)*f za`?t;zJ|?muw}y`L-drJ@X&vke zmrI?JSHrLh3MFO(_&e9l;LKjTmjFe7jcuaY2j054np<XA7ok|4)D){EPZF4xsnM2Ia$oZ}!puH|=)mV=7Xf^I~X0h+(-nL&9s0OFKj7>dKv?f#Rfb>U8ed|;anC~p};q`{7rk$t2=P(=xf< zhPlp<6F?efXh|pY>AnY@>we9uizq#W3V9FnsJUlj$@GG3y0lundI=1#(i&{ ziq3rC@S~!vmkm#@m@+tQ1gh?%i->a-@%+8#_M1pA`?EZP4kPu3N!eiU$+}J|zC0x( zE?+`ecX{&D91-R3-!DcN!|Vmrgq_c`e=#SU$oo}X2xOjPq-n8!FHZNt;8XP2u|9{9<8kn#wkVSi@l9=QkkQg!Wz6m@gL4!}IjHDLL6d&A**7YB6bA(5C~wMGuAk{q?s4L)CMf_zmRu-%j}_zf5Hp>%abN!^ey%3TO3NDKp^sg(FbKH( zi&idWEGybW{de1W1Vi55H$lv2SV-mW+02#xl_1{xp$~YnLicK1qsej3EX<)RaP0J8 z{8DSvC-C&;^VLUm>Xlaa%26j|+so-zb3Boma-Ci#3U8lMM(Do#Ef<^Mg)H z8Xy$^<@$_r8T+eI(J~8Z{?T%2eZUGMWdZ;oTQHX6djAgc%4Y>{HAXx)d$He~_1HOl z>MFkbxS5a#Yzl7x0pob8<0aYK|CTAgmY@FNuRcDCzikx1Q_S1X>;;ZLJ}X|>tGfK1 z%_IjYCTzz>Vxw;A_(n61O2!ckiO1^~W#TL^jNmMvU#C1fR?jWtnGB@&$Zj5Q{EV0t zWh`a>!E(Uzc<5?J$&2#;b$!*cgj&_coB zX^Y7BL79#+>+(l&?czGU;;0D2-OJC8r;X7G^Dh?;PfzQc?O?zy_$m8Nc*?W;F6U~i z9sSOK0ql?k=}5~GU1F7#tSbaW920ISwOaA^a5~I6-m=(K5;4gworq?Tih4Z;CFbYf zMVk~k;F8$u6UmH2yI~m?3$kA zB>ph0o2`LPO2S3zJ4vDY#zZ5Dq`T>r8q0H~Ck=0?|hnvEe^p@1^VByyx1M*?l>U3P`2x#~=rR zTsf7hOWHPDJdd`!UlnNAs(TnzRfg&ENZiAMlyU_>HkYza>L;?09SafQo$?wzpTp55 zI(A`(a%w8m((BTGe9kk6bpyrs@*mxNTdP3jDY!KOichC#i{A#KTACGhIj>r)x`L}1 z6|=IPu6r#&n&dD2OIh(mN!q7L@n=c)-MgX3`V(y)51B`iH`7oHhLto=8iA~Auj>sh za38U~p&rePB){nGeAMo>=VdOFEXpCdO7U`v+d16U!2Y}(h1+|wwQVA7uVW%4rFF3n zaQ^RRyw|`;u%XYVig_1nll$}WN=j*lQGxGNZz{~a28q_9d-VC9*7~T%gLdOQ_F%Q` zI3XD=?qLl{9oUd+DZky;S7C;VKk#ymX!JK9cNmgr2vVT@hs&H9^CnCkTXiSB)&2B#HtPL^Lxpun5YRfBr10or-@ zklw$Ly(uE6@a86QFzj{Zf&~IdPdBqWFaCfmxs;H#b}(UE^82!T$NCbp)`RaENRzS! z<&#Ef0v7A8a256NLPg5H|5Lj4P#)={`6U8*VfQo*v{x>tZ1M;qJB7kG3Q2Q=wjp9; zvBPPu`bC$D#@~GJcMRWLeAx!^4RxknbpJ2|3Rx3Dc6KyB?o;2e&89umEH}2|BI|jK z)=Gx6@z~j;B5fHrshyo*jfCGRQTkF^PRM+k@Lbd0aZ#(;6_91toY>yClCN?C%T}Kq zD(l5^T(1$13u+LR67ZAn{U3jKGZ2dQL)dJp^p1`hMEzBx35I!yTUmWF{iCWGH}K5>cKSBsqR=Z?`tFf~w+A0A#0MX(be53_pZLa^c-xS+x^FHmW*f>~q{(P^_O1ZT~wcCqLPiu6^! z`x`#YBn@9qWmY%SNPH%`MQrrc)g+F`IR;snvS84HO_X zyXksTttd6&8*PgkeiUS$$5B}Zuw%B17zhYg;=O7yM@nOlg079(h$(L%if_?AD()5H zGIZIJK%q-xl_u5=sNz0Iay&4vmS<>sf020x6NReZaQ4|HP|Lr$x1-<(im!~v-TXE0 ziSW?w$*2kqAm+R@oi#>1HNw3oM~r05J|jhV_b*8wyWj}BifO<1*CcKdlHOkN_+gF2 z%P-ZvaB}gqDI?LB) zIblALj)$m%`Da1BnQI1k9ydfeD#pQqk-LR|k(Hs#A)z#sUKv~?R-|%)#B`2U>GpZ( z;^OW9eNr|3o_0}I9aXA-==~H8_N+10Oc~a&7vDD$#Nh{T+lf_cwiUAY`Qhc) z(brer73-kkkc8QpZR|_br`U4ETZvssHquDyUK;8~Y~-HNg1QKPN6yc^Aq&2ad&(Mm3#QT(KsCrquik7zTi6j zC6efqi?=s7vwre=F(Gji9Wo}|#5PWjvW7=7p_%ABVr^FBGbIW9@~#ggVRM9^h(TEX z8ah6Lcc_PXDvq{_K>8okjQeUaG~C=$lRvRHUqgbHmMIa-zP0>oDBGejNh%(fJ2nE! z{ugwSqbkjA090SVZ2dgqcsZ$ko7(KSxtQ<_1XlgAxtP?>-{;dlgK9J8G6HO~w^EXZ zf0Yis{!eeY6EO%}>$X;KKeT7&POJ`Kg1z|pl6fb&{7q=2V6GI^**bJQ!(M%&M1s2y zNQRO&B4!d?4AjXBlfQ#q$9ZB%X% zZ^t|$W|(aLE5^ZbpOZbb%P= zsQsE@`(+$$eBynK%HPO^NgOAY;I4@iI$-4W^?|QuJ+dSwld`6_eZ1T=(j{tj7Gf^r zna8?-&2lz%NP9Xu_|U`_VlHuHO*mSGF^*vY1X=qp29pXWQs0RAdH)iwl?94Jod1Kc zDui-%(7*e7Hqqk5z0Dsvrq(nK3$lXpL{~QHUNe0^$ZP?-JZsZDuN><$I;!3b_oH&I zuDv>1QMZ}iv)%1|t}*ZA+5zIPT11&G5DNPE?ZDG_R|8104LCU|em1^Z*M=az)!)Vv zbVqRFYM}@W>()c(ES>AQ@bKNhy_9cVHaw{DSZ=f3bSLH2UD7zY@u?76%q3K*F0JV^ z+$}FFE33NJ`ltYYkREOTsaOrUoC>_*FrvCGIYwRKH@ zzxrWGv%OMC6IU};bE~4F;K&KG&IyO)Hl@Luetf})SpXU zwiUlBY=8awWqbN-vpIJyYHdu<(O%Qu-CoJw-rkVpXI1F4KwS-tE=knA<9>FjK_?ME zx-hM&Zh?)XX`p+02|sg;JseGS%;s#Y#su_G983;2!Q1*7mp!+Whrzs&6MG9u+Y!o; z2h5ewS4iaD;>TdjEN-6y4f`RC1ME;R>Jgg0{4Z;^*R)a+J{S1 z=tlLj1nDV0B(_8;OoYOinGMMfrUsq`-T{_I4zN?*L(((ZqqgI*W3)qet+XSugSjIp zHm;N@4M3xZ$AjtWQO`s7LUN+0Bl!j$9wfIz?nLPX%ZI#ynhjE)kAr%G&YAJuoy=^3 zYlCS+(D92dA%Uj}mKx!Fg5DI{X1GR(gVvt8`LzOdY#8eNMVVxdYccMY)ix8o%}Hk- z6zmIze(71Xoh5C9;X_u8vtFa^sXprognmUbfMqg5fBVu!#C74^7dwOQfePiB^x^o2 zBmp5FI{wQO^of3NzET;&<%jQe;f?n@*@dAQG_4|z--*WAJ2ZyIOd7I0^WJ<^C93+ zX9+O#mCc&Gaeai!+(SCMO`WX3v`yo@ui?oMy3elpC?KE2pMF?#;U7bL(@gzDcas0f zMzkn^KQ(}I?!^B&8jib~ig|1HN_lyw7Uym@Q%l;4Kts(^g|U#)?w1yUFdP?wg-~_O zr8D#BFBz3r9IMe4g};k&q|q*xsk6f+=eid6EdC03qCQ#p0au z32ZE);Y6vcndqO3(>EFZYwYpfk>|#a54v&Arm!r`@6>v)eti5WuCz|=9I?RPw~Dd% z%@P5%bP!sxdF1jBasFkW8n|+aIzSh@lh&S1ICo90QukwVmkO?gg^OkJt?W;@N{hW& zcc`)Dg3$RwTkxI$wf-ZYfn)KFHlu*N5eK{j_x}RKgfiW_A}leu)-mp`C^B_x3NYJO zH8~_Xv~@EL9onmVig`}^tT3^qm)TFatBawO-h$kkbMoh_ldan2rBkHS1#%#j)Ur8Dwom*r$9(Uvpndfkf7sn-hqTfiT`=%})MbQT` zUD&2lRqv#SDHdoq-EX&Io z{oJW|Xy5qusB827j&_^uaJ`N(-{go1MyEjz>~t$|U$P(Td`NTev>7-BSZL%Xjz!fl znceO|!{*VL`L%|$5ZEfxVmB$<&N8eg2$>oVFY7yKSYOT%ZJBjtdZ>-jXt&Rq-O!yb zA`C4d>m`doZDaluxqpEl_oO{_jvR_}si^J;id-Jw0BasLS^Z=OYV}h1!M7(b7)t2Y z9wF0cJX?;3|FJ-UwkMqHJWh!Y(vFF`w1;l3um^?it)?VUVm53iB!K)}=>mRmuREod zmdO%T_%eCJc-PrLJyy17kHVS=7pVn9PAy*G=?EHUP{*I)CFv+BR5~K5)k#yk@G=#j zm@Xb%vsv66PxQ4*7)L`K8u5U#ra;AZGMIjryak8FwWPWdG_qTY9tT@nY8Y z%M49>@j};*a|j&m@)Dih_vB9>08tD`Z*S4TsZ5%G@i$kw-F3Jw>cHfg(+d>w3PBXH zBiTJ;I!j$QPZA8_$x74*`?0JUoKFZJ`B5#)xMUeJR?9C_jpPU`J0#P4ihAAJa$iv7 z4Wey-Q3Dwe0zXDJCHS5|g4`m^Tq1WEl8#QPTxa`cQmo9*^V$2 zx^^ivjMa$iUL5Px6dVPH0DK?~gccWt`*-*i^V9q;3z)UTYP^33ZPCZHj;vMK^ATHC zUV9lpbP=_q%OT|_sT_ewI zuGV~aTe7uFZ@qa)?3&1xY&>=K5a=MSc3iKOE)K7kr!_8$_`qnmjd(Ejb+hpM@Jp1L zLELRj1}T8t*bZIU{NI zVU6`Vo7qla=+alar23gTyUNcM-kk!R;o^pOT$=ANvMwl~u#j9b`<6q?3STYh-J=Uv z3IMmt%1e$B;ATzgaKP}awyyB~Y3$?hR##=+eo&6h` zz1^`-!fRehwWO)3E{~D2!s8l9N(*hIv}K<j)!_rTzi_ zz8GbXrXOw!@>v+&N0l%qubh|-nR>Uf<8&*2H9R$Dqzli9%s$lor`9l9d1Bs#Zy767CzzZ8Boh#qtHGdU-^~TIYi zdwyOif1}{>2ool1WpzdWPTz4K*#~yiCT*O7SWaEFGS;p>OL!b)c`yVc^JeaQEaOwP zjscQ&yl)t>I?}EZSH6H%OrKI&Bw)F?O!agN%1?_gTDptN`*VdYfw%j^;tdSp7!yP>;h z$BjI$J-g2BX#;1+xoAH#(uIYchmwsRBcesmEMyG&=b%{iH3!HZ8ZNA^k1AKTIL&`n z2__fLZd%>uGkEMV1@w*cDDBB~okQf_$BkNEi6KVRZ$3!dMx)&tx22An4a*A8tX=1# zQ~w2?>%mzQWR<4dnGRq*ZxC=C%MHtjW* z#@++6I2i6svKq7xiR{Pfe!QVGUdhu|xn1GX3|A+|=$Nt|*fSLYkP1xkvhEQWK6U9a zC>d&whO^JP70qcoG2-s~B{^ctR$)9g2-El{l6Zl3H-fg7)Z~})OO-NWdzOoRiE;%!(u)9_NjarpGGc)X>!RcJoG5<7+ zAJh@5k)^igpQvFI#W^L~-wqtur?bcWD6O=5xlXrW?U-p~JdIn+k`;vlyb*?8rtlT}1G>`a5@?ibIt$x6` zvAW=)d;EMV=*!& z4rGd}4x|!wX+0F7%%K@-)iX}+VTAQS{_nq4DyQTgUpSHPPU$_`aHJSsKl2_eVW#o; zl#H8uXo9s7&vE$3on&`tf}fE6BzIVXYmtS>ugk>Fn16Kl*g#jp@gQ~nFfQ$Z3YLK8 zMe5Yvp@jBC^3&Xb3YLM>(?)uME=8)sTcNr}4$gr_fJH;vpt?2+jz`>}zSayzfF;GX zHe~+yHJhgHe@f&Yo51Rn+4Cz{3Az?zh2BYQXD0JKGhv&KO<{kjRm0HLHdnaAh}r%; zQtGv@qO_o@U}An2S5B*qkzvCfcPqpB%1x~*(;_C78iYOS#ZmV=)&1ci-5mjIvx=#y zleNBcxyO8?3^ShAlGrVeh4d0hRiYmMOs=6p76AKMufK#lh$hT9I3}PLC9US;_ct}H zLr~yEZ6NMa)$bf}BeWN7U$;1pN?3Ym?6qLDj)e=al-56a92G(m>?KsUVQhPeZL1EW z?sfpvm6)eCHkL+(RmJq}qIkFq;v4>F>8Ma=e!_jl;7E!1U&f1>*4hXS5~+(8sqtRD zD1rV{F_(|4Aw)xs*6IM4CevHeBf#7NKZ<+3h7vaL nJt_OAqx~ZUQaC>Weox!}f3yA{gGaFKn@afgf1FGSP+$HJaz#ez literal 0 HcmV?d00001 diff --git a/appliances/lib/common.sh b/appliances/lib/common.sh new file mode 100644 index 00000000..253aeb72 --- /dev/null +++ b/appliances/lib/common.sh @@ -0,0 +1,503 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + + +# shellcheck disable=SC2086 +true + + +# args: +msg() +{ + msg_type="$1" + shift + + case "$msg_type" in + info) + printf "[%s] => " "$(date)" + echo 'INFO:' "$@" + ;; + debug) + printf "[%s] => " "$(date)" >&2 + echo 'DEBUG:' "$@" >&2 + ;; + warning) + printf "[%s] => " "$(date)" >&2 + echo 'WARNING [!]:' "$@" >&2 + ;; + error) + printf "[%s] => " "$(date)" >&2 + echo 'ERROR [!!]:' "$@" >&2 + return 1 + ;; + *) + printf "[%s] => " "$(date)" >&2 + echo 'UNKNOWN [?!]:' "$@" >&2 + return 2 + ;; + esac + return 0 +} + +# arg: +gen_password() +{ + pw_length="${1:-16}" + new_pw='' + + while true ; do + if command -v pwgen >/dev/null ; then + new_pw=$(pwgen -s "${pw_length}" 1) + break + elif command -v openssl >/dev/null ; then + new_pw="${new_pw}$(openssl rand -base64 ${pw_length} | tr -dc '[:alnum:]')" + else + new_pw="${new_pw}$(head /dev/urandom | tr -dc '[:alnum:]')" + fi + # shellcheck disable=SC2000 + [ "$(echo $new_pw | wc -c)" -ge "$pw_length" ] && break + done + + echo "$new_pw" | cut -c1-${pw_length} +} + +# arg: +is_ipv4_address() +{ + echo "$1" | grep '^[0-9.]*$' | awk ' + BEGIN { + FS = "."; + octet = 0; + } + { + for(i = 1; i <= NF; i++) + if (($i >= 0) && ($i <= 255)) + octet++; + } + END { + if (octet == 4) + exit 0; + else + exit 1; + }' +} + +get_local_ip() +{ + extif=$(ip r | awk '{if ($1 == "default") print $5;}') + local_ip=$(ip a show dev "$extif" | \ + awk '{if ($1 == "inet") print $2;}' | sed -e '/^127\./d' -e 's#/.*##') + + echo "${local_ip:-127.0.0.1}" +} + +# arg: +is_my_ip() +( + _ip="$1" + + _local_ips=$(ip a | \ + sed -n 's#^[[:space:]]*inet[[:space:]]\+\([^/[:space:]]\+\)[/[:space:]].*#\1#p') + + for _local_ip in ${_local_ips} ; do + if [ "$_ip" = "$_local_ip" ] ; then + return 0 + fi + done + + return 1 +) + +# returns an netmask in the old notation, eg.: 255.255.255.255 +# arg: +# +# NOTE: shamelessly copied from here: +# https://forums.gentoo.org/viewtopic-t-888736-start-0.html +cidr_to_mask () +( + # Number of args to shift, 255..255, first non-255 byte, zeroes + set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 + [ $1 -gt 1 ] && shift $1 || shift + echo ${1-0}.${2-0}.${3-0}.${4-0} +) + +# Gets the network part of an IP +# arg: +get_network_ip() +( + awk -v ip="$1" -v mask="$2" 'END { + split(ip, ip_b, "."); split(mask, mask_b, "."); + for (i=1; i<=4; ++i) x = x "." and(ip_b[i], mask_b[i]); + sub(/^./, "", x); print x; }' +# +# NOTE: this originally never worked properly: +# https://gitlab.com/openconnect/vpnc-scripts/-/merge_requests/5 +# +# The fix is to first find the network address. +get_gw_ip() +( + _ip=$(echo "$1" | awk 'BEGIN{FS="/"}{print $1;}') + _mask=$(echo "$1" | awk 'BEGIN{FS="/"}{print $2;}') + + if echo "$_mask" | grep -q '^[0-9][0-9]*$' && [ "$_mask" -le 32 ] ; then + # ip had cidr prefix - we will find network ip + _mask=$(cidr_to_mask "$_mask") + _ip=$(get_network_ip "$_ip" "$_mask") + elif [ -n "$_mask" ] ; then + # netmask is garbage + return 1 + fi + + ip r g "$_ip" 2>/dev/null | awk ' + { + for(i = 1; i <= NF; i++) + { + if ($i == "src") + { + print $(i + 1); + exit 0; + } + } + } + ' +) + +# it will create a new hostname from an ip address, but only if the current one +# is just localhost and in that case it will also prints it on the stdout +# arg: [] +generate_hostname() +( + if [ "$(hostname -s)" = localhost ] ; then + if [ -n "$1" ] ; then + _new_hostname="$(echo $1 | tr -d '[:space:]' | tr '.' '-')" + else + _new_hostname="one-$(get_local_ip | tr '.' '-')" + fi + hostname "$_new_hostname" + hostname > /etc/hostname + hostname -s + fi +) + +# show default help based on the ONE_SERVICE_PARAMS +# service_help in appliance.sh may override this function +default_service_help() +{ + echo "USAGE: " + + for _command in 'help' 'install' 'configure' 'bootstrap'; do + echo " $(basename "$0") ${_command}" + + case "${_command}" in + help) echo ' Prints this help' ;; + install) echo ' Installs service' ;; + configure) echo ' Configures service via contextualization or defaults' ;; + bootstrap) echo ' Bootstraps service via contextualization' ;; + esac + + local _index=0 + while [ -n "${ONE_SERVICE_PARAMS[${_index}]}" ]; do + local _name="${ONE_SERVICE_PARAMS[${_index}]}" + local _type="${ONE_SERVICE_PARAMS[$((_index + 1))]}" + local _desc="${ONE_SERVICE_PARAMS[$((_index + 2))]}" + local _input="${ONE_SERVICE_PARAMS[$((_index + 3))]}" + _index=$((_index + 4)) + + if [ "${_command}" = "${_type}" ]; then + if [ -z "${_input}" ]; then + echo -n ' ' + else + echo -n ' * ' + fi + + printf "%-25s - %s\n" "${_name}" "${_desc}" + fi + done + + echo + done + + echo 'Note: (*) variables are provided to the user via USER_INPUTS' +} + +#TODO: more or less duplicate to common.sh/service_help() +params2md() +{ + local _command=$1 + + local _index=0 + local _count=0 + while [ -n "${ONE_SERVICE_PARAMS[${_index}]}" ]; do + local _name="${ONE_SERVICE_PARAMS[${_index}]}" + local _type="${ONE_SERVICE_PARAMS[$((_index + 1))]}" + local _desc="${ONE_SERVICE_PARAMS[$((_index + 2))]}" + local _input="${ONE_SERVICE_PARAMS[$((_index + 3))]}" + _index=$((_index + 4)) + + if [ "${_command}" = "${_type}" ] && [ -n "${_input}" ]; then + # shellcheck disable=SC2016 + printf '* `%s` - %s\n' "${_name}" "${_desc}" + _count=$((_count + 1)) + fi + done + + if [ "${_count}" -eq 0 ]; then + echo '* none' + fi +} + +create_one_service_metadata() +{ + # shellcheck disable=SC2001 + cat >"${ONE_SERVICE_METADATA}" < [] +# use in pipe with yum -y --showduplicates list +# yum version follows these rules: +# starting at the first colon (:) and up to the first hyphen (-) +# example: +# 3:18.09.1-3.el7 -> 18.09.1 +yum_pkg_filter() +{ + _pkg="$1" + _version="$2" + + awk -v pkg="$_pkg" '{if ($1 ~ "^" pkg) print $2;}' | \ + sed -e 's/^[^:]*://' -e 's/-.*//' | \ + if [ -n "$_version" ] ; then + # only the correct versions + awk -v version="$_version" ' + { + if ($1 ~ "^" version) + print $1; + }' + else + cat + fi +} + +# arg: +is_in_list() +{ + _word="$1" + shift + + # shellcheck disable=SC2048 + for i in $* ; do + if [ "$_word" = "$i" ] ; then + return 0 + fi + done + + return 1 +} + +# arg: +is_true() +{ + _value=$(eval echo "\$${1}" | tr '[:upper:]' '[:lower:]') + case "$_value" in + 1|true|yes|y) + return 0 + ;; + esac + + return 1 +} + +# arg: [context file] +save_context_base64() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + msg info "Store current context in the file: ${_context_file}" + _context_vars=$(set | sed -n 's/^\(ONEAPP_[^=[:space:]]\+\)=.*/\1/p') + + if ! [ -f "$_context_file" ] ; then + echo '{}' > "$_context_file" + fi + + _old_context=$(cat "$_context_file") + + { + echo "$_old_context" + + for _context_var in ${_context_vars} ; do + _value=$(eval "printf \"\$${_context_var}\"") + echo '{}' | jq -S --arg val "$_value" ". + {\"${_context_var}\": \$val | @base64}" + done + } | jq -sS add > "$_context_file" +} + +# arg: [context file] +save_context() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + msg info "Store current context in the file: ${_context_file}" + + "${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + update "${_context_file}" +} + +# arg: [context file] +load_context() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + if ! [ -f "${_context_file}" ] ; then + msg info "Create empty context file: ${_context_file}" + echo '{}' > "${_context_file}" + return 0 + fi + + msg info "Load last context from the file: ${_context_file}" + + _vars=$("${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + -t names load "${_context_file}") + + for i in $_vars ; do + _value=$(get_value_from_context_file "${i}" "${_context_file}") + eval "${i}=\$(echo \"\$_value\")" + # shellcheck disable=SC2163 + export "${i}" + done +} + +# arg: [context file] +get_changed_context_vars() +{ + _context_file="${1:-$ONE_SERVICE_CONTEXTFILE}" + + if ! [ -f "${_context_file}" ] ; then + return 0 + fi + + "${ONE_SERVICE_SETUP_DIR}/bin/context-helper" \ + -t names compare "${_context_file}" +} + +# arg: [] +get_value_from_context_file() +{ + _var="$1" + _context_file="${2:-$ONE_SERVICE_CONTEXTFILE}" + + [ -z "${_var}" ] && return 1 + + jq -cr ".${_var}" < "${_context_file}" +} + +# arg: +is_context_variable_updated() +{ + _varname="$1" + + for v in $(get_changed_context_vars "${ONE_SERVICE_CONTEXTFILE}") ; do + if [ "$v" = "${_varname}" ] ; then + # variable has been updated + return 0 + fi + done + + return 1 +} + +# arg: +check_pidfile() +{ + _pidfile="$1" + + if [ -f "${_pidfile}" ] ; then + _pid=$(grep '^[0-9]\+$' "${_pidfile}") + else + _pid= + fi + + if [ -n "${_pid}" ] ; then + kill -0 ${_pid} + return $? + fi + + return 1 +} + +# arg: +wait_for_pidfile() +{ + _pidfile="$1" + _timeout=60 # we wait at most one minute... + + while [ "$_timeout" -gt 0 ]; do + # we wait for the pidfile to emerge... + if [ -f "$_pidfile" ] ; then + _pid=$(cat "$_pidfile") + # we retry until the pid in pidfile is a number... + if echo "$_pid" | grep -q '^[0-9]\+$' ; then + # the pid must be stable for 3 seconds... + _check_time=3 + while [ "$_check_time" -gt 0 ] ; do + sleep 1s + if kill -0 "$_pid" ; then + _check_time=$(( _check_time - 1 )) + else + break + fi + done + if [ "$_check_time" -eq 0 ] ; then + # we succeeded - we have valid pid... + break + fi + fi + fi + + sleep 1s + _timeout=$(( _timeout - 1 )) + done +} + +wait_for_file() +( + _timeout=60 # we wait at most one minute... + + while [ "$_timeout" -gt 0 ] ; do + if [ -e "$1" ] ; then + return 0 + fi + + sleep 1s + _timeout=$(( _timeout - 1 )) + done + + return 1 +) + diff --git a/appliances/lib/context-helper.py b/appliances/lib/context-helper.py new file mode 100755 index 00000000..d923616d --- /dev/null +++ b/appliances/lib/context-helper.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 + +# --------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# --------------------------------------------------------------------------- # + + +import sys +import os +import argparse +import re +import json + + +JSON_INDENT = 4 + + +class SaveFileError(Exception): + """When there is an issue with writing to the context file.""" + pass + + +class OpenFileError(Exception): + """When there is an issue with opening the context file.""" + pass + + +def get_current_context(env_prefix): + """ Returns all env. variables where names start with 'env_prefix'. """ + + context = {} + regex = re.compile("^" + env_prefix) + for env_var in os.environ: + if regex.search(env_var): + context[env_var] = os.environ[env_var] + + return context + + +def get_file_context(env_prefix, context_file): + """ + Returns all env. variables from 'context_file' where names start with + 'env_prefix'. + . + """ + + # load context file + with open(context_file, "r") as f: + file_context = json.load(f) + + # mark all not matching prefix + regex = re.compile("^" + env_prefix) + to_delete = [] + for env_var in file_context: + if not regex.search(env_var): + to_delete.append(env_var) + + # delete all non-matching env. vars + for env_var in to_delete: + del file_context[env_var] + + return file_context + + +def save_context(env_prefix, context_file, json_indent=JSON_INDENT): + """ + Saves current context (env. variables with matching 'env_prefix') into the + 'context_file'. + + It will overwrite the existing file if it exists! + + Returns context. + """ + + context = get_current_context(env_prefix) + with open(context_file, "w") as f: + f.write(json.dumps(context, indent=json_indent)) + f.write("\n") + + return context + + +def load_context(env_prefix, context_file): + """ + It loads context from the 'context_file'. It will load only those + variables matching 'env_prefix' and which are not yet in the current + context. + + It will NOT overwrite any variable in the current context! + + Returns result context as described above. + + NOTE: + Because it is impossible to modify environment of the caller - the result + from this function should dumped to the stdout as a json, which must be + sourced later by the caller (eg: shell script). + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # filter only those not in context already + context = get_current_context(env_prefix) + result = {} + for file_env in file_context: + if context.get(file_env) is None: + result[file_env] = file_context[file_env] + + return result + + +def update_context(env_prefix, context_file, json_indent=JSON_INDENT): + """ + Similar to save but it will only update the file - it will overwrite + existing variables in the 'context_file' with those from the current + context but it will leave the rest intact. + + Returns full content of the file as context. + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # load current context + context = get_current_context(env_prefix) + + # update file context with current context + for env_var in context: + file_context[env_var] = context[env_var] + + # write updated content back + with open(context_file, "w") as f: + f.write(json.dumps(file_context, indent=json_indent)) + f.write("\n") + + return file_context + + +def compare_context(env_prefix, context_file): + """ + It will return keypairs of context variables which differs from the + 'context_file' and the current context. + """ + + # load context file + file_context = get_file_context(env_prefix, context_file) + + # load current context + context = get_current_context(env_prefix) + + # find all changed + result = {} + for env_var in context: + if file_context.get(env_var) != context.get(env_var): + result[env_var] = context[env_var] + + # when variable was not changed but deleted + # TO NOTE: currently not usable because VNF is setting defaults in context.json + # + #for env_var in file_context: + # if context.get(env_var) is None: + # result[env_var] = "" + + return result + + +def error_msg(msg): + length = 80 + line = "" + for word in msg.split(' '): + if (len(line + ' ' + word)) < length: + line = line.strip() + ' ' + word + else: + print(line, file=sys.stderr) + line = word + if (line != ""): + print(line, file=sys.stderr) + + +def print_result(context, output_type, json_indent=JSON_INDENT): + """ + Prints context according to output type (the whole json, or just variable + names - each on separate line - for simple usage). + """ + + if output_type == 'json': + print(json.dumps(context, indent=json_indent)) + elif output_type == 'names': + for i in context: + print(i) + elif output_type == 'shell': + for i in context: + print("%s='%s'" % (i, context[i])) + + +def main(): + parser = argparse.ArgumentParser(description="ONE context helper") + parser.add_argument("-f", "--force", + dest="context_overwrite", + required=False, + action='store_const', + const=True, + default=False, + help="Forces overwrite of the file if needed") + parser.add_argument("-e", "--env-prefix", + required=False, + metavar="", + default="ONEAPP_", + help="Prefix of the context variables " + "(default: 'ONEAPP_')") + parser.add_argument("-t", "--output-type", + required=False, + metavar="json|names|shell", + choices=["json", "names", "shell"], + default="json", + help="Output type (affects only load and compare) " + "(default: 'json')") + parser.add_argument("context_action", + metavar="save|load|update|compare", + choices=["save", "load", "update", "compare"], + help=("Save/update context into the file," + " or load from it," + " or compare it with the current context.")) + parser.add_argument("context_file", + metavar="", + help="Filepath of the context file") + + args = parser.parse_args() + + if args.context_action == "save": + try: + if (os.path.isfile(args.context_file) + and (not args.context_overwrite)): + # file exists and no --force used... + raise SaveFileError + except SaveFileError: + error_msg("ERROR: Trying to save context but the file: '" + + args.context_file + "' already exists!") + error_msg("Hint 1: Try '--force' if you wish to overwrite it") + error_msg("Hint 2: Or maybe you want to use 'update'...") + return 1 + context = save_context(args.env_prefix, args.context_file) + + elif args.context_action == "load": + try: + if not os.path.isfile(args.context_file): + raise OpenFileError + except OpenFileError: + error_msg("ERROR: Trying to open the context file: '" + + args.context_file + "' but it doesn't exist!") + return 1 + context = load_context(args.env_prefix, args.context_file) + + # dump context values which should be sourced by caller + print_result(context, args.output_type) + + elif args.context_action == "update": + if os.path.isfile(args.context_file): + # update existing + context = update_context(args.env_prefix, args.context_file) + else: + # no file yet, so simply save context instead + context = save_context(args.env_prefix, args.context_file) + + elif args.context_action == "compare": + try: + if not os.path.isfile(args.context_file): + raise OpenFileError + except OpenFileError: + error_msg("ERROR: Trying to open the context file: '" + + args.context_file + "' but it doesn't exist!") + return 1 + context = compare_context(args.env_prefix, args.context_file) + + # dump context values which should be sourced by caller + print_result(context, args.output_type) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/appliances/lib/functions.sh b/appliances/lib/functions.sh new file mode 100644 index 00000000..c382eb50 --- /dev/null +++ b/appliances/lib/functions.sh @@ -0,0 +1,407 @@ +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# args: "$@" +_parse_arguments() +{ + _ACTION=nil + state=nil + while [ -n "$1" ] ; do + case "$state" in + nil) + case "$1" in + -h|--help|help) + _ACTION=help + state=done + ;; + install) + _ACTION=install + state=install + ;; + configure|bootstrap) + _ACTION="$1" + state=configure + ;; + *) + _ACTION=badargs + msg unknown "BAD USAGE: unknown argument: $1" + break + ;; + esac + ;; + configure) + case "$1" in + reconfigure) + ONE_SERVICE_RECONFIGURE=true + state=done + ;; + *) + _ACTION=badargs + msg unknown "BAD USAGE: unknown argument: $1" + break + ;; + esac + ;; + install) + ONE_SERVICE_VERSION="$1" + state=done + ;; + done) + _ACTION=badargs + msg unknown "BAD USAGE: extraneous argument(s)" + break + ;; + esac + shift + done +} + +# args: "$0" "${@}" +_lock_or_fail() +{ + this_script="$1" + if [ "${_SERVICE_LOCK}" != "$this_script" ] ; then + exec env _SERVICE_LOCK="$this_script" flock -xn $this_script "$@" + fi +} + +_on_exit() +{ + # this is the exit handler - I want to clean up as much as I can + set +e + + # first do whatever the service appliance needs to clean after itself + service_cleanup + + # delete temporary working file(s) + if [ -n "$_SERVICE_LOG_PIPE" ] ; then + rm -f "$_SERVICE_LOG_PIPE" + fi + + # exiting while the stage was interrupted - change status to failure + _status=$(_get_current_service_result) + case "$_status" in + started) + _set_service_status failure + ;; + esac + + # all done - delete pid file and exit + rm -f "$ONE_SERVICE_PIDFILE" +} + +_trap_exit() +{ + trap '_on_exit 2>/dev/null' INT QUIT TERM EXIT +} + +_is_running() +{ + pid=$(_get_pid) + + if echo "$pid" | grep -q '^[0-9]\+$' ; then + kill -0 $pid + return $? + fi + + return 1 +} + +_get_pid() +{ + if [ -f "$ONE_SERVICE_PIDFILE" ] ; then + cat "$ONE_SERVICE_PIDFILE" + fi +} + +_write_pid() +{ + echo $$ > "$ONE_SERVICE_PIDFILE" +} + +_get_service_status() +{ + if [ -f "$ONE_SERVICE_STATUS" ] ; then + cat "$ONE_SERVICE_STATUS" + fi +} + +_get_current_service_step() +{ + _get_service_status | sed -n 's/^\(install\|configure\|bootstrap\)_.*/\1/p' +} + +_get_current_service_result() +{ + _result=$(_get_service_status | sed -n 's/^\(install\|configure\|bootstrap\)_\(.*\)/\2/p') + case "$_result" in + started|success|failure) + echo "$_result" + ;; + esac +} + +# arg: install|configure|bootstrap [| +_check_service_status() +{ + _reconfigure="$2" + + case "$1" in + install) + case "$(_get_service_status)" in + '') + # nothing was done so far + return 0 + ;; + install_success) + msg warning "Installation was already done - skip" + return 1 + ;; + install_started) + msg error "Installation was probably interrupted - abort" + _set_service_status failure + exit 1 + ;; + install_failure) + msg error "Last installation attempt failed - abort" + exit 1 + ;; + *) + msg error "Install step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + configure) + case "$(_get_service_status)" in + '') + # nothing was done so far - missing install + msg error "Cannot proceed with configuration - missing installation step" + exit 1 + ;; + install_success) + # installation was successfull - can continue + return 0 + ;; + configure_success) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg warning "Configuration was already done - skip" + return 1 + fi + ;; + configure_started) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Configuration was probably interrupted - abort" + _set_service_status failure + exit 1 + fi + ;; + configure_failure) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Last configuration attempt failed - abort" + exit 1 + fi + ;; + bootstrap*) + if is_true _reconfigure ; then + msg info "Starting reconfiguration of the service" + return 0 + else + msg error "Configure step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + fi + ;; + *) + msg error "Configure step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + bootstrap) + case "$(_get_service_status)" in + '') + # nothing was done so far - missing install + msg error "Cannot proceed with bootstrapping - missing installation step" + exit 1 + ;; + configure_success) + # configuration was successfull - can continue + return 0 + ;; + bootstrap_success) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg warning "Bootstrap was already done - skip" + return 1 + fi + ;; + bootstrap_started) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg error "Bootstrap was probably interrupted - abort" + _set_service_status failure + exit 1 + fi + ;; + bootstrap_failure) + if is_true _reconfigure ; then + msg info "Redo bootstrap of the service" + return 0 + else + msg error "Last bootstrap attempt failed - abort" + exit 1 + fi + ;; + *) + msg error "Bootstrap step cannot be run - go check: ${ONE_SERVICE_STATUS}" + exit 1 + ;; + esac + ;; + esac + + msg error "THIS SHOULD NOT HAPPEN!" + msg unknown "Possibly a bug, wrong usage, action etc." + exit 1 +} + +# arg: install|configure|bootstrap|success|failure +_set_service_status() +{ + _status="$1" + case "$_status" in + install|configure|bootstrap) + echo ${_status}_started > "$ONE_SERVICE_STATUS" + _set_motd "$_status" started + ;; + success|failure) + _step=$(_get_current_service_step) + echo ${_step}_${_status} > "$ONE_SERVICE_STATUS" + _set_motd "$_step" "$_status" + ;; + *) + msg unknown "THIS SHOULD NOT HAPPEN!" + msg unknown "Possibly a bug, wrong usage, action etc." + exit 1 + ;; + esac +} + +_print_logo() +{ + cat > ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} <> ${ONE_SERVICE_MOTD} < +_start_log() +{ + _logfile="$1" + _SERVICE_LOG_PIPE="$ONE_SERVICE_LOGDIR"/one_service_log.pipe + + # create named pipe + mknod "$_SERVICE_LOG_PIPE" p + + # connect tee to the pipe and let it write to the log and screen + tee <"$_SERVICE_LOG_PIPE" -a "$_logfile" & + + # save stdout to fd 3 and force shell to write to the pipe + exec 3>&1 >"$_SERVICE_LOG_PIPE" +} + +_end_log() +{ + # restore stdout for the shell and close fd 3 + exec >&3 3>&- +} + diff --git a/appliances/scripts/context_service_net-90.sh b/appliances/scripts/context_service_net-90.sh new file mode 100644 index 00000000..ca494b20 --- /dev/null +++ b/appliances/scripts/context_service_net-90.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Runs OpenNebula service appliances configuration & bootstrap script + +#TODO: just single run based on "status" +_oneapp_service='/etc/one-appliance/service' + +# one-context 6.2.0+ shifts the command argument +if [ $# -eq 2 ]; then + _reconfigure="$2" +else + _reconfigure="$1" +fi + +if [ -x "${_oneapp_service}" ]; then + "${_oneapp_service}" configure "$_reconfigure" && \ + "${_oneapp_service}" bootstrap +fi diff --git a/appliances/scripts/context_service_net-99.sh b/appliances/scripts/context_service_net-99.sh new file mode 100644 index 00000000..7633ce15 --- /dev/null +++ b/appliances/scripts/context_service_net-99.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +ENV_FILE=${ENV_FILE:-/var/run/one-context/one_env} + +if [ "$REPORT_READY" != "YES" ]; then + exit 0 +fi + +# $TOKENTXT is available only through the env. file +if [ -f "${ENV_FILE}" ]; then + . "${ENV_FILE}" +fi + +# Reports only if ONE service appliance bootstrapped successfully +if [ -x '/etc/one-appliance/service' ]; then + _status=$(cat '/etc/one-appliance/status' 2>/dev/null) + if [ "${_status}" != 'bootstrap_success' ]; then + exit 0 + fi +fi + +### + +if which onegate >/dev/null 2>&1; then + onegate vm update --data "READY=YES" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi + +if which curl >/dev/null 2>&1; then + curl -X "PUT" "${ONEGATE_ENDPOINT}/vm" \ + --header "X-ONEGATE-TOKEN: $TOKENTXT" \ + --header "X-ONEGATE-VMID: $VMID" \ + -d "READY=YES" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi + +if which wget >/dev/null 2>&1; then + wget --method=PUT "${ONEGATE_ENDPOINT}/vm" \ + --body-data="READY=YES" \ + --header "X-ONEGATE-TOKEN: $TOKENTXT" \ + --header "X-ONEGATE-VMID: $VMID" + + if [ "$?" = "0" ]; then + exit 0 + fi +fi diff --git a/appliances/service b/appliances/service new file mode 100755 index 00000000..d02530af --- /dev/null +++ b/appliances/service @@ -0,0 +1,133 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# USAGE: +# service [-h|--help|help] +# Print help and usage +# +# service install [] +# Download files and install packages for the desired version of a service +# +# service configure +# Configure the service via contextualization or with defaults +# +# service bootstrap +# Use user's predefined values for the final setup and start the service + +ONE_SERVICE_DIR=/etc/one-appliance +ONE_SERVICE_LOGDIR=/var/log/one-appliance +ONE_SERVICE_STATUS="${ONE_SERVICE_DIR}/status" +ONE_SERVICE_TEMPLATE="${ONE_SERVICE_DIR}/template" +ONE_SERVICE_METADATA="${ONE_SERVICE_DIR}/metadata" +ONE_SERVICE_REPORT="${ONE_SERVICE_DIR}/config" +ONE_SERVICE_FUNCTIONS="${ONE_SERVICE_DIR}/service.d/functions.sh" +ONE_SERVICE_COMMON="${ONE_SERVICE_DIR}/service.d/common.sh" +ONE_SERVICE_APPLIANCE="${ONE_SERVICE_DIR}/service.d/appliance.sh" +ONE_SERVICE_SETUP_DIR="/opt/one-appliance" +ONE_SERVICE_MOTD='/etc/motd' +ONE_SERVICE_PIDFILE="/var/run/one-appliance-service.pid" +ONE_SERVICE_CONTEXTFILE="${ONE_SERVICE_DIR}/context.json" +ONE_SERVICE_RECONFIGURE=false # the first time is always a full configuration +ONE_SERVICE_VERSION= # can be set by argument or to default +ONE_SERVICE_RECONFIGURABLE= # can be set by the appliance script + +# security precautions +set -e +umask 0077 + +# -> TODO: read all from ONE_SERVICE_DIR + +# source common functions +. "$ONE_SERVICE_COMMON" + +# source this script's functions +. "$ONE_SERVICE_FUNCTIONS" + +# source service appliance implementation (following functions): +# service_help +# service_install +# service_configure +# service_bootstrap +# service_cleanup +. "$ONE_SERVICE_APPLIANCE" + +# parse arguments and set _ACTION +_parse_arguments "$@" + +# execute requested action or fail +case "$_ACTION" in + nil|help) + # check if the appliance defined a help function + if type service_help >/dev/null 2>&1 ; then + # use custom appliance help + service_help + else + # use default + default_service_help + fi + ;; + badargs) + exit 1 + ;; + # all stages do basically this: + # 1. check status file if _ACTION can be run at all + # 2. set service status file + # 3. set motd (message of the day) + # 4. execute stage (install, configure or bootstrap) + # 5. set service status file again + # 6. set motd to normal or to signal failure + install|configure|bootstrap) + # check the status (am I running already) + if _is_running ; then + msg warning "Service script is running already - PID: $(_get_pid)" + exit 0 + fi + + # secure lock or fail (only one running instance of this script is allowed) + _lock_or_fail "$0" "$@" + + # set a trap for an exit (cleanup etc.) + _trap_exit + + # write a pidfile + _write_pid + + # analyze the current stage and either proceed or abort + if ! _check_service_status $_ACTION "$ONE_SERVICE_RECONFIGURABLE" ; then + exit 0 + fi + + # mark the start of a stage (install, configure or bootstrap) + _set_service_status $_ACTION + + # here we make sure that log directory exists + mkdir -p "$ONE_SERVICE_LOGDIR" + chmod 0700 "$ONE_SERVICE_LOGDIR" + + # execute action + _start_log "${ONE_SERVICE_LOGDIR}/ONE_${_ACTION}.log" + service_${_ACTION} 2>&1 + _end_log + + # if we reached this point then the current stage was successfull + _set_service_status success + ;; +esac + +exit 0 + diff --git a/appliances/vnf.sh b/appliances/vnf.sh new file mode 100644 index 00000000..3c7e2ebb --- /dev/null +++ b/appliances/vnf.sh @@ -0,0 +1,4387 @@ +#!/usr/bin/env bash + +# ---------------------------------------------------------------------------- # +# Copyright 2018-2022, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +# ---------------------------------------------------------------------------- # + +# Important notes ############################################################# +# +# +# ********************** +# * Context parameters * +# ********************** +# +# [DNS] +# +# ONEAPP_VNF_DNS_ALLOWED_NETWORKS / ... +# ONEAPP_VNF_DNS_CONFIG +# ONEAPP_VNF_DNS_ENABLED +# ONEAPP_VNF_DNS_INTERFACES [@]|[/[@]] ... +# ONEAPP_VNF_DNS_MAX_CACHE_TTL +# ONEAPP_VNF_DNS_NAMESERVERS [@] ... +# ONEAPP_VNF_DNS_TCP_DISABLED +# ONEAPP_VNF_DNS_UDP_DISABLED +# ONEAPP_VNF_DNS_UPSTREAM_TIMEOUT # msecs +# ONEAPP_VNF_DNS_USE_ROOTSERVERS +# +# +# [DHCP4] +# +# ONEAPP_VNF_DHCP4_ /:- +# ONEAPP_VNF_DHCP4__GATEWAY ... +# ONEAPP_VNF_DHCP4__DNS ... +# ONEAPP_VNF_DHCP4__MTU +# ONEAPP_VNF_DHCP4_AUTHORITATIVE +# ONEAPP_VNF_DHCP4_CONFIG +# ONEAPP_VNF_DHCP4_DNS ... +# ONEAPP_VNF_DHCP4_ENABLED +# ONEAPP_VNF_DHCP4_GATEWAY ... +# ONEAPP_VNF_DHCP4_HOOK[0-9] +# ONEAPP_VNF_DHCP4_INTERFACES [/] ... +# ONEAPP_VNF_DHCP4_LEASE_DATABASE +# ONEAPP_VNF_DHCP4_LEASE_TIME +# ONEAPP_VNF_DHCP4_LOGFILE +# ONEAPP_VNF_DHCP4_MAC2IP_ENABLED +# ONEAPP_VNF_DHCP4_MAC2IP_MACPREFIX : # e.g.: "02:00" +# ONEAPP_VNF_DHCP4_MAC2IP_SUBNETS / ... +# ONEAPP_VNF_DHCP4_SUBNET[0-9] +# +# +# [ROUTER4] +# +# ONEAPP_VNF_ROUTER4_ENABLED +# ONEAPP_VNF_ROUTER4_INTERFACES ... +# +# +# [NAT4] +# +# ONEAPP_VNF_NAT4_ENABLED +# ONEAPP_VNF_NAT4_INTERFACES_OUT ... +# +# +# [SDNAT4] +# +# ONEAPP_VNF_SDNAT4_ENABLED +# # TODO: this is noop +# ONEAPP_VNF_SDNAT4_ONEGATE_ENABLED +# ONEAPP_VNF_SDNAT4_INTERFACES ... +# ONEAPP_VNF_SDNAT4_REFRESH_RATE +# TODO: +# ONEAPP_VNF_SDNAT4__RULE : +# +# +# [LB] +# +# ONEAPP_VNF_LB_ENABLED +# ONEAPP_VNF_LB_ONEGATE_ENABLED +# ONEAPP_VNF_LB_INTERFACES ... +# ONEAPP_VNF_LB_REFRESH_RATE +# ONEAPP_VNF_LB_FWMARK_OFFSET # must be >1 (default 10000) +# ONEAPP_VNF_LB_CONFIG +# ONEAPP_VNF_LB[0-9]_IP +# ONEAPP_VNF_LB[0-9]_PROTOCOL TCP|UDP|BOTH # optional +# ONEAPP_VNF_LB[0-9]_PORT # optional +# ONEAPP_VNF_LB[0-9]_METHOD NAT|DR (default NAT) +# ONEAPP_VNF_LB[0-9]_FWMARK # optional - must be >0 +# ONEAPP_VNF_LB[0-9]_TIMEOUT # optional (default 10s) +# ONEAPP_VNF_LB[0-9]_SCHEDULER # default wlc +# +# ONEAPP_VNF_LB[0-9]_SERVER[0-9]_HOST +# ONEAPP_VNF_LB[0-9]_SERVER[0-9]_PORT +# ONEAPP_VNF_LB[0-9]_SERVER[0-9]_WEIGHT +# ONEAPP_VNF_LB[0-9]_SERVER[0-9]_ULIMIT +# ONEAPP_VNF_LB[0-9]_SERVER[0-9]_LLIMIT +# +# via onegate: +# ONEGATE_LB[0-9]_IP +# ONEGATE_LB[0-9]_PROTOCOL +# ONEGATE_LB[0-9]_PORT +# ONEGATE_LB[0-9]_SERVER_HOST +# ONEGATE_LB[0-9]_SERVER_PORT +# ONEGATE_LB[0-9]_SERVER_WEIGHT +# ONEGATE_LB[0-9]_SERVER_ULIMIT +# ONEGATE_LB[0-9]_SERVER_LLIMIT +# +# +# [HAPROXY] +# +# ONEAPP_VNF_HAPROXY_ENABLED +# ONEAPP_VNF_HAPROXY_ONEGATE_ENABLED +# ONEAPP_VNF_HAPROXY_INTERFACES ... +# ONEAPP_VNF_HAPROXY_REFRESH_RATE +# ONEAPP_VNF_HAPROXY_CONFIG +# ONEAPP_VNF_HAPROXY_LB[0-9]_IP +# ONEAPP_VNF_HAPROXY_LB[0-9]_PORT # optional +# +# ONEAPP_VNF_HAPROXY_LB[0-9]_SERVER[0-9]_HOST +# ONEAPP_VNF_HAPROXY_LB[0-9]_SERVER[0-9]_PORT +# +# via onegate: +# ONEGATE_HAPROXY_LB[0-9]_IP +# ONEGATE_HAPROXY_LB[0-9]_PORT +# ONEGATE_HAPROXY_LB[0-9]_SERVER_HOST +# ONEGATE_HAPROXY_LB[0-9]_SERVER_PORT +# +# +# [KEEPALIVED] +# +# ONEAPP_VNF_KEEPALIVED_ENABLED +# ONEAPP_VNF_KEEPALIVED_INTERFACES ... +# ONEAPP_VNF_KEEPALIVED_INTERVAL +# ONEAPP_VNF_KEEPALIVED_PASSWORD # must be under 8 characters +# ONEAPP_VNF_KEEPALIVED_PRIORITY +# ONEAPP_VNF_KEEPALIVED_VRID <1-255> +# ONEAPP_VNF_KEEPALIVED__INTERVAL +# ONEAPP_VNF_KEEPALIVED__PASSWORD # must be under 8 characters +# ONEAPP_VNF_KEEPALIVED__PRIORITY +# ONEAPP_VNF_KEEPALIVED__VRID <1-255> +# +# +# [VROUTER] +# +# ONEAPP_VROUTER__MANAGEMENT +# ONEAPP_VROUTER__VIP[0-9] +# +# +# [OLD VROUTER] +# +# VROUTER_ID +# VROUTER_KEEPALIVED_ID +# VROUTER_KEEPALIVED_PASSWORD +# _VROUTER_IP +# _VROUTER_MANAGEMENT +# +# +# ***************************** +# * Loopback interface ('lo') * +# ***************************** +# +# Using the loopback ('lo') as a VNF interface is tricky - this summarize it: +# +# ROUTER: +# enabling forwarding on the loopback should not affect anything because of +# the way iptables works - that means that this: +# net.ipv4.conf.lo.forwarding = 1 +# does nothing - at least according to this great guide: +# https://www.frozentux.net/iptables-tutorial/chunkyhtml/c962.html +# (if I understood it properly) and as I verified it by tests. So it is not +# used at all - lo forwarding is implicitly zero if not explicitly requested +# by the user. +# +# KEEPALIVED: +# there is no sense or means to run vrrp instance on the loopback... That +# means that 'lo' is simply skipped/ignored. +# +# DNS: +# 'lo' is enabled by default if DNS is enabled - so 'lo' in interfaces does +# not change the fact. +# +# DHCP4: +# if the loopback is used as of one the interfaces then subnet is configured +# accordingly - it will provide leases for the loopback address range +# (127.0.0.0/8 if not specified in more detail). +# +# NAT4: +# loopback can be used as a NAT interface too although the use-case can be +# very specific if useful at all... +# +# Important notes ############################################################# + + +### ShellCheck ################################################################ + +# shellcheck disable=SC1091 +# shellcheck disable=SC2086 +# shellcheck disable=SC2059 +true + +# these exports are unnecessary but it makes ShellCheck happy... + +export ONE_SERVICE_NAME +export ONE_SERVICE_VERSION +export ONE_SERVICE_DESCRIPTION +export ONE_SERVICE_SHORT_DESCRIPTION +export ONE_SERVICE_BUILD +export ONE_SERVICE_PARAMS +export ONE_SERVICE_RECONFIGURABLE + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # + + +# List of contextualization parameters +ONE_SERVICE_PARAMS=( + 'ONEAPP_VNF_ROUTER4_ENABLED' 'configure' 'ROUTER4 Enable IPv4 routing service' 'O|boolean' + 'ONEAPP_VNF_ROUTER4_INTERFACES' 'configure' 'ROUTER4 Managed interfaces (default: all)' 'O|text' + 'ONEAPP_VNF_NAT4_ENABLED' 'configure' 'NAT4 Enable network address translation' 'O|boolean' + 'ONEAPP_VNF_NAT4_INTERFACES_OUT' 'configure' 'NAT4 External/outgoing interfaces for NAT (default: none)' 'O|text' + 'ONEAPP_VNF_DNS_ENABLED' 'configure' 'DNS Enable recursor service' 'O|boolean' + 'ONEAPP_VNF_DNS_INTERFACES' 'configure' 'DNS Listening interfaces (default: all)' 'O|text' + 'ONEAPP_VNF_DNS_CONFIG' 'configure' 'DNS Full Unbound config in base64' 'O|boolean' + 'ONEAPP_VNF_DNS_USE_ROOTSERVERS' 'configure' 'DNS Directly use root name servers (default: true)' 'O|boolean' + 'ONEAPP_VNF_DNS_NAMESERVERS' 'configure' 'DNS Upstream nameservers to forward queries' 'O|text' + 'ONEAPP_VNF_DNS_ALLOWED_NETWORKS' 'configure' 'DNS Allowed client networks to make queries' 'O|text' + 'ONEAPP_VNF_DNS_UPSTREAM_TIMEOUT' 'configure' 'DNS Upstream nameservers connection timeout (msecs)' 'O|number' + 'ONEAPP_VNF_DNS_MAX_CACHE_TTL' 'configure' 'DNS Maximum caching time (secs)' 'O|number' + 'ONEAPP_VNF_DNS_TCP_DISABLED' 'configure' 'DNS Disable TCP protocol' 'O|boolean' + 'ONEAPP_VNF_DNS_UDP_DISABLED' 'configure' 'DNS Disable UDP protocol' 'O|boolean' + 'ONEAPP_VNF_KEEPALIVED_ENABLED' 'configure' 'KEEPALIVED Enable vrouter service' 'O|boolean' + 'ONEAPP_VNF_KEEPALIVED_INTERFACES' 'configure' 'KEEPALIVED Managed interfaces (default: all)' 'O|text' + 'ONEAPP_VNF_KEEPALIVED_PASSWORD' 'configure' 'KEEPALIVED Global vrouter password' 'O|boolean' + 'ONEAPP_VNF_KEEPALIVED_INTERVAL' 'configure' 'KEEPALIVED Global advertising interval (secs)' 'O|float' + 'ONEAPP_VNF_KEEPALIVED_PRIORITY' 'configure' 'KEEPALIVED Global vrouter priority' 'O|number' + 'ONEAPP_VNF_KEEPALIVED_VRID' 'configure' 'KEEPALIVED Global vrouter id (1-255)' 'O|number' + 'ONEAPP_VNF_DHCP4_ENABLED' 'configure' 'DHCP4 Enable service' 'O|boolean' + 'ONEAPP_VNF_DHCP4_INTERFACES' 'configure' 'DHCP4 Listening interfaces (default: all)' 'O|text' + 'ONEAPP_VNF_DHCP4_CONFIG' 'configure' 'DHCP4 Full ISC Kea config in base64 JSON' 'O|text' + 'ONEAPP_VNF_DHCP4_DNS' 'configure' 'DHCP4 Global default nameservers' 'O|text' + 'ONEAPP_VNF_DHCP4_GATEWAY' 'configure' 'DHCP4 Global default gateway/routers' 'O|text' + 'ONEAPP_VNF_DHCP4_AUTHORITATIVE' 'configure' 'DHCP4 Server authoritativity (default: true)' 'O|boolean' + 'ONEAPP_VNF_DHCP4_LEASE_TIME' 'configure' 'DHCP4 Lease time in seconds' 'O|number' + 'ONEAPP_VNF_DHCP4_LEASE_DATABASE' 'configure' 'DHCP4 Lease database in base64 JSON' 'O|text' + 'ONEAPP_VNF_DHCP4_LOGFILE' 'configure' 'DHCP4 Log filename' 'O|text' + 'ONEAPP_VNF_DHCP4_SUBNET' 'configure' 'DHCP4 Subnet definition(s) in base64 JSON' 'O|text' + 'ONEAPP_VNF_DHCP4_HOOK' 'configure' 'DHCP4 Hook definition(s) in base64 JSON' 'O|text' + 'ONEAPP_VNF_DHCP4_MAC2IP_ENABLED' 'configure' 'DHCP4 Enable hook for MAC-to-IP DHCP lease (default: true)' 'O|boolean' + 'ONEAPP_VNF_DHCP4_MAC2IP_MACPREFIX' 'configure' 'DHCP4 HW/MAC address prefix for MAC-to-IP hook (default: 02:00)' 'O|text' + 'ONEAPP_VNF_DHCP4_MAC2IP_SUBNETS' 'configure' 'DHCP4 List of subnets for MAC-to-IP hook' 'O|text' +) + +# Control variables +ONE_SERVICE_RECONFIGURABLE=true + + +### Appliance metadata ######################################################## + +ONE_SERVICE_NAME='Service VNF - KVM' +ONE_SERVICE_VERSION=latest +ONE_SERVICE_VERSION_VNF_DHCP4=2.2.0 +ONE_SERVICE_BUILD=$(date +%s) +ONE_SERVICE_SHORT_DESCRIPTION='VNF Appliance for KVM hosts' +ONE_SERVICE_DESCRIPTION=$(cat <:- + ONEAPP_VNF_DHCP4_ETH0_DNS= ... + ONEAPP_VNF_DHCP4_ETH0_GATEWAY= ... + ONEAPP_VNF_DHCP4_ETH0_MTU= +\`\`\` + +**NOTE**: Subnets defined by \`ONEAPP_VNF_DHCP4_SUBNET*\` params take +precedence over \`ONEAPP_VNF_DHCP4_*\` params - so if you define even +one \`ONEAPP_VNF_DHCP4_SUBNET\` then only these subnets will be configured. + +**BEWARE: Because this appliance allows reconfiguration some previously defined +variables will be still respected! This can pose a problem if for example a +\`ONEAPP_VNF_DHCP4_SUBNET0\` was defined already but now you wish to use +dynamic per interface \`ONEAPP_VNF_DHCP4_*\` variables. In that case you +must also provide an override for the old \`ONEAPP_VNF_DHCP4_SUBNET0\` +variable...simply set it empty: \`ONEAPP_VNF_DHCP4_SUBNET0=""\`.** + +The DHCP4 VNF also provides other contextualization parameters among which is +the prominent **onelease** hook (\`ONEAPP_VNF_DHCP4_MAC2IP_ENABLED\`). It +serves a simple purpose of leasing IP addresses to OpenNebula's VMs matching +their HW/MAC addresses (check the \`ONEAPP_VNF_DHCP4_MAC2IP_MACPREFIX\` param +to be in line with OpenNebula's generated MAC addresses). This behavior is by +default enabled - if you wish to disable it then just simply set +\`ONEAPP_VNF_DHCP4_MAC2IP_ENABLED\` to \`false\`. + +EOF +) + + +### Contextualization defaults ################################################ + +ONEAPP_VNF_DHCP4_AUTHORITATIVE="${ONEAPP_VNF_DHCP4_AUTHORITATIVE:-true}" +ONEAPP_VNF_DHCP4_LEASE_TIME="${ONEAPP_VNF_DHCP4_LEASE_TIME:-3600}" +ONEAPP_VNF_DHCP4_LOGFILE="${ONEAPP_VNF_DHCP4_LOGFILE:-/var/log/kea/kea-dhcp4.log}" +ONEAPP_VNF_DHCP4_MAC2IP_ENABLED="${ONEAPP_VNF_DHCP4_MAC2IP_ENABLED:-true}" +ONEAPP_VNF_KEEPALIVED_INTERVAL="${ONEAPP_VNF_KEEPALIVED_INTERVAL:-1}" +ONEAPP_VNF_KEEPALIVED_PRIORITY="${ONEAPP_VNF_KEEPALIVED_PRIORITY:-100}" +ONEAPP_VNF_KEEPALIVED_VRID="${ONEAPP_VNF_KEEPALIVED_VRID:-1}" +ONEAPP_VNF_DNS_UPSTREAM_TIMEOUT="${ONEAPP_VNF_DNS_UPSTREAM_TIMEOUT:-1128}" +ONEAPP_VNF_DNS_MAX_CACHE_TTL="${ONEAPP_VNF_DNS_MAX_CACHE_TTL:-3600}" +ONEAPP_VNF_DNS_USE_ROOTSERVERS="${ONEAPP_VNF_DNS_USE_ROOTSERVERS:-true}" +ONEAPP_VNF_DHCP4_MAC2IP_MACPREFIX="${ONEAPP_VNF_DHCP4_MAC2IP_MACPREFIX:-02:00}" +ONEAPP_VNF_SDNAT4_REFRESH_RATE="${ONEAPP_VNF_SDNAT4_REFRESH_RATE:-30}" +ONEAPP_VNF_SDNAT4_ONEGATE_ENABLED="${ONEAPP_VNF_SDNAT4_ONEGATE_ENABLED:-true}" +ONEAPP_VNF_LB_REFRESH_RATE="${ONEAPP_VNF_LB_REFRESH_RATE:-30}" +ONEAPP_VNF_LB_ONEGATE_ENABLED="${ONEAPP_VNF_LB_ONEGATE_ENABLED:-false}" +ONEAPP_VNF_LB_FWMARK_OFFSET="${ONEAPP_VNF_LB_FWMARK_OFFSET:-10000}" +ONEAPP_VNF_HAPROXY_REFRESH_RATE="${ONEAPP_VNF_HAPROXY_REFRESH_RATE:-30}" +ONEAPP_VNF_HAPROXY_ONEGATE_ENABLED="${ONEAPP_VNF_HAPROXY_ONEGATE_ENABLED:-false}" + +### Globals ################################################################### + +DEP_PKGS="\ + coreutils \ + openssh-server \ + curl \ + jq \ + openssl \ + ca-certificates \ + bind-tools \ + boost \ + postgresql-client \ + mariadb-client \ + mariadb-connector-c \ + cassandra-cpp-driver \ + xz \ + procps \ + py3-psutil \ + unbound \ + keepalived \ + iptables \ + ip6tables \ + logrotate \ + ipvsadm \ + libcap \ + dns-root-hints \ + ruby-concurrent-ruby \ + fping \ + " + +ALL_SUPPORTED_VNF_NAMES="\ + DHCP4 + ROUTER4 + DNS + KEEPALIVED + NAT4 + SDNAT4 + LB + HAPROXY + " + +# leave these empty +ENABLED_VNF_LIST= +DISABLED_VNF_LIST= +UPDATED_VNF_LIST= +ETH_TRIPLETS= + +# Runing Alpine version +ALPINE_VERSION=$(. /etc/os-release ; \ + echo "${VERSION_ID}" | awk 'BEGIN{FS="."}{print $1 "." $2;}') + +# ONE VNF service +ONE_VNF_OPENRC_NAME="one-vnf" +ONE_VNF_PIDFILE="/run/one-vnf.pid" +ONE_VNF_SERVICE_SCRIPT="/opt/one-appliance/lib/one-vnf/one-vnf.rb" +ONE_VNF_SERVICE_CONFIG="/opt/one-appliance/etc/one-vnf-config.js" + +# TODO: refactor these variable names to VNF_DHCP4_* +# onekea installation directory +ONEKEA_PREFIX="/usr" + +# onekea version +ONEKEA_VERSION="${ONE_SERVICE_VERSION_VNF_DHCP4:-2.2.0}" +ONEKEA_ONELEASE4_VERSION="1.1.1-r0" + +# onekea artifact filename +ONEKEA_ARTIFACT="onekea-${ONEKEA_VERSION}/kea-hook-onelease4-${ONEKEA_ONELEASE4_VERSION}.apk" + +# onekea library artifact filename +ONEKEA_ARTIFACT_LIBHOOK_LEASE="libkea-onelease-dhcp4.so" + +# VNF DHCP4 specifics +ONEKEA_DHCP4_CONFIG="/etc/kea/kea-dhcp4.conf" +ONEKEA_DHCP6_CONFIG="/etc/kea/kea-dhcp6.conf" +ONEKEA_DHCP4_CONFIG_TEMP="/etc/kea/kea-dhcp4.conf-new" +ONEKEA_DHCP4_LOGROTATE="/etc/logrotate.d/onekea" + +ONEKEA_DHCP4_PIDFILE="/run/kea-dhcp4.pid" +ONEKEA_DHCP6_PIDFILE="/run/kea-dhcp6.pid" + +# VNF ROUTER4 specifics +VNF_ROUTER4_SYSCTL="/etc/sysctl.d/01-one-router4.conf" + +# VNF KEEPALIVED specifics +VNF_KEEPALIVED_CONFIG_DIR="/etc/keepalived/" +VNF_KEEPALIVED_CONFIG="${VNF_KEEPALIVED_CONFIG_DIR}/keepalived.conf" +VNF_KEEPALIVED_NOTIFY_SCRIPT="/etc/keepalived/ha-failover.sh" +VNF_KEEPALIVED_NOTIFY_LOGROTATE="/etc/logrotate.d/ha-failover" +VNF_KEEPALIVED_HA_STATUS_SCRIPT="/etc/keepalived/ha-check-status.sh" +VNF_KEEPALIVED_PIDFILE="/run/keepalived.pid" + +# VNF DNS specifics +VNF_DNS_CONFIG="/etc/unbound/unbound.conf" +VNF_DNS_CONFIG_TEMP="/etc/unbound/unbound.conf-new" +VNF_DNS_PIDFILE="/run/one-unbound.pid" +VNF_DNS_OPENRC_NAME="one-unbound" + +# VNF NAT4 specifics +VNF_NAT4_OPENRC_NAME="one-nat4" +VNF_NAT4_IPTABLES_RULES="/etc/iptables/nat4-rules" + +# VNF SDNAT4 specifics +#VNF_SDNAT4_IPTABLES_RULES="/etc/iptables/sdnat4-rules" + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # + + +# +# service implementation +# + +service_cleanup() +{ + rm -f "$ONEKEA_DHCP4_CONFIG_TEMP" +} + +service_install() +{ + # packages + install_pkgs ${DEP_PKGS} + + # fix open-rc - start crashed services + install_openrc_config + + # install one-vnf service + install_one_vnf_service + + # fix sshd + enable_ssh_forwarding + + # VNFs + + # VNF DHCP + install_dhcp + install_onekea_hooks + + # VNF DNS + install_dns + + # VNF NAT + install_nat + + # VNF KEEPALIVED + install_keepalived + + # VNF HAPROXY + install_haproxy + + # VNF TOOLS + install_tools + + # disable all VNFs + # NOTE: using workaround for failing ruby in packer/qemu: + # qemu: /usr/lib/ruby/2.7.0/rubygems/core_ext/kernel_require.rb:83:in `require': cannot load such file -- json (LoadError) + # (probably due to C extensions with json??) + _SKIP_ONE_VNF=YES stop_and_disable_vnfs "$ALL_SUPPORTED_VNF_NAMES" + _SKIP_ONE_VNF='' + + # service metadata + create_one_service_metadata + + # cleanup + postinstall_cleanup + + msg info "INSTALLATION FINISHED" + + return 0 +} + +service_configure() +{ + # + # Initialization + # + + msg info '=============================' + msg info '=== CONFIGURATION STARTED ===' + msg info '=============================' + + # load last context state + load_context + + # workaround for mismatch in user designated interfaces, one context and + # actual interfaces on the system... + make_eth_triplets + + # reintroduce vrouter variables as ONEAPP variables... + # - some just for compatibility reasons with the original vrouter + # - rest of the variables affect more or all VNFs + # + # NOTE: NICs and networking changes affect basically all... + # + # BEWARE: the original vrouter variables will always take precedence over + # their ONEAPP_ alternatives (to avoid confusing the users using sunstone + # UI which still supports original vrouter)... + load_vrouter_variables + + # comb and sort multivalue variables + assort_multivalue_variables + + # decide which VNFs will be enabled/disabled + sortout_vnfs + + # + # VNFs Specific Configuration + # + + # VNF ROUTER4 + configure_router4 + + # VNF KEEPALIVED + configure_keepalived + + # VNF DHCP4 + configure_dhcp4 + + # VNF DNS + configure_dns + + # VNF NAT4 + configure_nat4 + + # VNF SDNAT4 + configure_sdnat4 + + # VNF LB + configure_lb + + # VNF HAPROXY + configure_haproxy + + # + # Finalization + # + + # save the current context + save_context + + # enable/disable VNFs + toggle_vnfs + + # store credentials + report_config + + msg info "--- CONFIGURATION FINISHED ---" + + return 0 +} + +service_bootstrap() +{ + msg info "BOOTSTRAP FINISHED" + + return 0 +} + + + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # + + +# +# functions +# + +postinstall_cleanup() +{ + msg info "Delete cache and stored packages" + apk cache clean || true + rm -rf /var/cache/apk/* + + msg info "Remove artifact directory: ${ONE_SERVICE_SETUP_DIR}/vnf" + rm -rf "${ONE_SERVICE_SETUP_DIR}/vnf" +} + +install_pkgs() +{ + msg info "Fix repositories file" + _alpine_version=$(sed -n \ + 's/[[:space:]]*VERSION_ID=\([0-9]\+\.[0-9]\+\).*/\1/p' \ + /etc/os-release) + + cat > /etc/apk/repositories < /etc/rc.conf < "/etc/init.d/${ONE_VNF_OPENRC_NAME}" < "${ONE_VNF_SERVICE_CONFIG}" + chmod 0644 "${ONE_VNF_SERVICE_CONFIG}" +} + +enable_ssh_forwarding() +{ + sed -i '/^[[:space:]]*AllowTcpForwarding/d' /etc/ssh/sshd_config + sed -i '/^[[:space:]]*AllowAgentForwarding/d' /etc/ssh/sshd_config + + echo 'AllowTcpForwarding yes' >> /etc/ssh/sshd_config + echo 'AllowAgentForwarding yes' >> /etc/ssh/sshd_config +} + +# TODO: aliases - deduplicate code by implicating that ETH0_IP is +# ETH0_ALIAS_IP...this should be implemented everywhere where geth* is used +# ETH0_ALIAS0_CONTEXT_FORCE_IPV4 = "", +# ETH0_ALIAS0_DNS = "192.168.101.1", +# ETH0_ALIAS0_EXTERNAL = "", +# ETH0_ALIAS0_GATEWAY = "192.168.101.1", +# ETH0_ALIAS0_GATEWAY6 = "", +# ETH0_ALIAS0_IP = "192.168.101.100", +# ETH0_ALIAS0_IP6 = "", +# ETH0_ALIAS0_IP6_PREFIX_LENGTH = "", +# ETH0_ALIAS0_IP6_ULA = "", +# ETH0_ALIAS0_MAC = "02:00:c0:a8:65:64", +# ETH0_ALIAS0_MASK = "255.255.255.0", +# ETH0_ALIAS0_MTU = "", +# ETH0_ALIAS0_NETWORK = "", +# ETH0_ALIAS0_SEARCH_DOMAIN = "", +# ETH0_ALIAS0_VLAN_ID = "41", +# ETH0_ALIAS0_VROUTER_IP = "", +# ETH0_ALIAS0_VROUTER_IP6 = "", +# ETH0_ALIAS0_VROUTER_MANAGEMENT = "", +load_vrouter_variables() +{ + msg info "Try to load original vrouter's parameters if used" + + # TODO: alias (improve this) + # TODO: IPv6 + # These changes the character of the network and affects basically + # everything...we record them and track their changes + # + # PART 1: + # reset all variables relevant to removed NIC (unset would mask the change + # and script would not be able recognize that any change to NIC happened) + _recorded_eths=$(env | \ + sed -n 's/^ONEAPP_VROUTER_\(ETH[0-9]\+\)_.*/\1/p' | sort -u) + for _recorded_eth in ${_recorded_eths} ; do + _recorded_aliases=$(env | \ + sed -n "s/^ONEAPP_VROUTER_${_recorded_eth}_\(ALIAS[0-9]\+\)_.*/\1/p" | \ + sort -u) + + _eth=$(geth1 "${_recorded_eth}" 1) + if [ -z "$_eth" ] ; then + # unset + for _item in IP MASK MAC DNS GATEWAY MTU ; do + msg info "RESET: ONEAPP_VROUTER_${_recorded_eth}_${_item}" + eval "ONEAPP_VROUTER_${_recorded_eth}_${_item}=''" + eval "export ONEAPP_VROUTER_${_recorded_eth}_${_item}" + + # interface is gone so we erase all aliases for it + for _recorded_alias in ${_recorded_aliases} ; do + msg info "RESET: ONEAPP_VROUTER_${_recorded_eth}_${_recorded_alias}_${_item}" + eval "ONEAPP_VROUTER_${_recorded_eth}_${_recorded_alias}_${_item}=''" + eval "export ONEAPP_VROUTER_${_recorded_eth}_${_recorded_alias}_${_item}" + done + done + else + # interface is still there but alias does not have to be + for _item in IP MASK MAC DNS GATEWAY MTU ; do + for _recorded_alias in ${_recorded_aliases} ; do + _value=$(eval "printf \"\$${_eth}_${_recorded_alias}_${_item}\"") + if [ -z "$_value" ] ; then + msg info "RESET: ONEAPP_VROUTER_${_eth}_${_recorded_alias}_${_item}" + eval "ONEAPP_VROUTER_${_eth}_${_recorded_alias}_${_item}=''" + eval "export ONEAPP_VROUTER_${_eth}_${_recorded_alias}_${_item}" + fi + done + done + fi + done + + # old vrouter's context variables, e.g.: + # VROUTER_ID: + # noop + # VROUTER_KEEPALIVED_ID: + # It serves as a default if ONEAPP_VNF_KEEPALIVED__VRID are + # absent + # VROUTER_KEEPALIVED_PASSWORD: + # Equivalent to ONEAPP_VNF_KEEPALIVED_PASSWORD + # ETH?_VROUTER_IP: + # Is tied closely with Keepalived VNF (it implements it) but also it + # affects all other VNFs (it exposes their function on this VIP) + # ETH?_VROUTER_IP6: + # TODO + # ETH?_VROUTER_MANAGEMENT: + # this is VNF agnostic, can be overruled by VNF specific interfaces + # variable (like ONEAPP_VNF__INTERFACES) + + for _eth in $(get_eths) ; do + # these variables affects some or all VNFs: + # + # ETH0_CONTEXT_FORCE_IPV4= + # ETH0_DNS=8.8.8.8 + # ETH0_GATEWAY6= + # ETH0_GATEWAY=192.168.122.1 + # ETH0_IP6= + # ETH0_IP6_PREFIX_LENGTH= + # ETH0_IP6_ULA= + # ETH0_IP=192.168.122.10 + # ETH0_MAC=02:00:c0:a8:7a:0a + # ETH0_MASK=255.255.255.0 + # ETH0_MTU= + # ETH0_NETWORK=192.168.122.0 + # ETH0_SEARCH_DOMAIN= + # ETH0_VLAN_ID= + # ETH0_VROUTER_IP6= + # ETH0_VROUTER_IP= + # ETH0_VROUTER_MANAGEMENT= + + # some of these are irrelevant (as of now) for any VNF but most of them + # affect directly or indirectly some or all VNFs - we will record their + # values so we can compare them in the future; by doing that we will + # know what VNFs need to be reconfigured... + + # VROUTER_MANAGEMENT + _management=$(eval "printf \"\$${_eth}_VROUTER_MANAGEMENT\"") + # inject a new context variable into the environment + eval "ONEAPP_VROUTER_$(geth3 ${_eth} 1)_MANAGEMENT='${_management}'" + eval "export ONEAPP_VROUTER_$(geth3 ${_eth} 1)_MANAGEMENT" + msg info "INJECTED: ONEAPP_VROUTER_$(geth3 ${_eth} 1)_MANAGEMENT =" \ + "$_management" + + # TODO: IPv6 + # VROUTER_IP + _vip=$(eval "printf \"\$${_eth}_VROUTER_IP\"") + # inject a new context variable into the environment + eval "ONEAPP_VROUTER_$(geth3 ${_eth} 1)_VIP='${_vip}'" + eval "export ONEAPP_VROUTER_$(geth3 ${_eth} 1)_VIP" + msg info "INJECTED: ONEAPP_VROUTER_$(geth3 ${_eth} 1)_VIP =" \ + "$_vip" + + # TODO: IPv6 + # These changes the character of the network and affects basically + # everything...we record them and track their changes + # + # PART 2: + # update the variables with the actual values + for _item in IP MASK MAC DNS GATEWAY MTU ; do + _value=$(eval "printf \"\$${_eth}_${_item}\"") + # inject and record a new context variable into the environment + eval "ONEAPP_VROUTER_$(geth3 ${_eth} 1)_${_item}='${_value}'" + eval "export ONEAPP_VROUTER_$(geth3 ${_eth} 1)_${_item}" + msg info "SAVED: ${_eth}_${_item} as ONEAPP_VROUTER_$(geth3 ${_eth} 1)_${_item} =" \ + "$_value" + + # TODO: alias (improve this) + # and the same for aliases + _aliases=$(env | \ + sed -n "s/^${_eth}_\(ALIAS[0-9]\+\)_.*/\1/p" | sort -u) + for _alias in ${_aliases} ; do + _value=$(eval "printf \"\$${_eth}_${_alias}_${_item}\"") + # inject and record a new context variable into the environment + eval "ONEAPP_VROUTER_$(geth3 ${_eth} 1)_${_alias}_${_item}='${_value}'" + eval "export ONEAPP_VROUTER_$(geth3 ${_eth} 1)_${_alias}_${_item}" + msg info "SAVED: ${_eth}_${_alias}_${_item} as ONEAPP_VROUTER_$(geth3 ${_eth} 1)_${_alias}_${_item} =" \ + "$_value" + done + done + + # VROUTER_KEEPALIVED_ID + # + # BEWARE: + # because it is not possible to honor two sources of values which are in + # a conflict the VROUTER_KEEPALIVED_ID serves only as a fallback in the + # absence of ONEAPP_VNF_KEEPALIVED__VRID... + _vrid=$(eval "printf \"\$ONEAPP_VNF_KEEPALIVED_$(geth3 ${_eth} 1)_VRID\"") + if [ -z "$_vrid" ] && [ -n "$VROUTER_KEEPALIVED_ID" ] ; then + # only if we did not provided per instance virtual ID... + + if ! is_valid_vrouter_id "$VROUTER_KEEPALIVED_ID" ; then + msg error "Used 'VROUTER_KEEPALIVED_ID' with an invalid value (it must be in 1-255): ${VROUTER_KEEPALIVED_ID}" + fi + + # inject a new context variable into the environment + eval "ONEAPP_VNF_KEEPALIVED_$(geth3 ${_eth} 1)_VRID='${VROUTER_KEEPALIVED_ID}'" + eval "export ONEAPP_VNF_KEEPALIVED_$(geth3 ${_eth} 1)_VRID" + msg info "INJECTED: ONEAPP_VNF_KEEPALIVED_$(geth3 ${_eth} 1)_VRID =" \ + "$VROUTER_KEEPALIVED_ID" + fi + done + + # for backwards compatibility support with the old vrouter we test if any + # old vrouter variable was set + if [ -n "${VROUTER_ID}${VROUTER_KEEPALIVED_ID}" ] ; then + # VROUTER_KEEPALIVED_PASSWORD + # + # BEWARE: + # this is necessary to be able to _delete_ (reset) the usage of a password; + # the ONEAPP_VNF_KEEPALIVED_PASSWORD must be always overwritten no matter + # the content of the VROUTER_KEEPALIVED_PASSWORD... + # + # inject a new context variable into the environment + ONEAPP_VNF_KEEPALIVED_PASSWORD="${VROUTER_KEEPALIVED_PASSWORD}" + export ONEAPP_VNF_KEEPALIVED_PASSWORD + msg info "INJECTED: ONEAPP_VNF_KEEPALIVED_PASSWORD =" \ + "$VROUTER_KEEPALIVED_PASSWORD" + + # IMPLICIT KEEPALIVED + if [ -z "${ONEAPP_VNF_KEEPALIVED_ENABLED}" ] ; then + msg info "Detected old VROUTER context - we enable KEEPALIVED VNF implicitly" + # inject a new context variable into the environment + ONEAPP_VNF_KEEPALIVED_ENABLED=YES + export ONEAPP_VNF_KEEPALIVED_ENABLED + msg info "INJECTED: ONEAPP_VNF_KEEPALIVED_ENABLED = YES" + fi + + # IMPLICIT ROUTER4 + if [ -z "${ONEAPP_VNF_ROUTER4_ENABLED}" ] ; then + msg info "Detected old VROUTER context - we enable ROUTER4 VNF implicitly" + # inject a new context variable into the environment + ONEAPP_VNF_ROUTER4_ENABLED=YES + export ONEAPP_VNF_ROUTER4_ENABLED + msg info "INJECTED: ONEAPP_VNF_ROUTER4_ENABLED = YES" + fi + fi +} + +# unify the value separators (spaces, commas, semicolons) and sort the values +# where it does not break meaning and parse ! expressions +assort_multivalue_variables() +{ + msg info "Unify the separators for multivalue parameters" + + # sortable multivalue variables are these: + # (it will also deduplicate the values) + for _var in \ + ONEAPP_VNF_DNS_ALLOWED_NETWORKS \ + ONEAPP_VNF_DNS_INTERFACES \ + ONEAPP_VNF_DHCP4_INTERFACES \ + ONEAPP_VNF_DHCP4_MAC2IP_SUBNETS \ + ONEAPP_VNF_ROUTER4_INTERFACES \ + ONEAPP_VNF_NAT4_INTERFACES_OUT \ + ONEAPP_VNF_SDNAT4_INTERFACES \ + ONEAPP_VNF_LB_INTERFACES \ + ONEAPP_VNF_HAPROXY_INTERFACES \ + ONEAPP_VNF_KEEPALIVED_INTERFACES \ + ; + do + _value=$(eval "printf \"\$${_var}\"" | \ + tr ',;' ' ' | \ + sed -e 's/^[[:space:]]*//' \ + -e 's/[[:space:]]*$//' \ + -e 's/[[:space:]]\+/ /g' | \ + tr ' ' '\n' | sort -u | tr '\n' ' ' | \ + sed 's/[[:space:]]*$//') + + # save the modified value back + eval "${_var}=\"${_value}\"" + eval "export ${_var}" + done + + # unsortable (the order is significant) multivalue variables are these: + _plus_eth_vars=$(env | sed -n \ + -e 's/^\(ONEAPP_VNF_DHCP4_ETH[0-9]\+_DNS\)=.*/\1/p' \ + -e 's/^\(ONEAPP_VNF_DHCP4_ETH[0-9]\+_GATEWAY\)=.*/\1/p' \ + ) + for _var in \ + ONEAPP_VNF_DNS_NAMESERVERS \ + ONEAPP_VNF_DHCP4_DNS \ + ONEAPP_VNF_DHCP4_GATEWAY \ + ${_plus_eth_vars} ; + do + _value=$(eval "printf \"\$${_var}\"" | \ + tr ',;' ' ' | \ + sed -e 's/^[[:space:]]*//' \ + -e 's/[[:space:]]*$//' \ + -e 's/[[:space:]]\+/ /g') + + # save the modified value back + eval "${_var}=\"${_value}\"" + eval "export ${_var}" + done + + # interface params require a special treatment + # it should be eth or lo, but it also can be an ip address or ! + for _var in \ + ONEAPP_VNF_DNS_INTERFACES \ + ONEAPP_VNF_DHCP4_INTERFACES \ + ONEAPP_VNF_ROUTER4_INTERFACES \ + ONEAPP_VNF_NAT4_INTERFACES_OUT \ + ONEAPP_VNF_SDNAT4_INTERFACES \ + ONEAPP_VNF_LB_INTERFACES \ + ONEAPP_VNF_HAPROXY_INTERFACES \ + ONEAPP_VNF_KEEPALIVED_INTERFACES \ + ; + do + # because we are using the eth triplet system, we can upcase all eths + # in interfaces variables... + _value=$(eval "printf \"\$${_var}\"" | \ + tr '[:lower:]' '[:upper:]') + + _disabled_eths= + _interfaces= + for _iface in ${_value} ; do + if echo "$_iface" | grep -q -e '^[!]ETH[0-9]\+$' -e '^[!]LO$' ; then + # valid expression for: do not use this interface + _iface=$(printf "$_iface" | tr -d '!') + _disabled_eths="${_disabled_eths} ${_iface}" + else + # valid interface + _interfaces="${_interfaces} ${_iface}" + fi + done + + if [ -n "$_interfaces" ] ; then + # negated eths has no meaning now + _disabled_eths= + fi + + _value=$(printf "${_interfaces}" | \ + sed -e 's/^[[:space:]]*//' \ + -e 's/[[:space:]]*$//' \ + -e 's/[[:space:]]\+/ /g') + + # save the modified value back + eval "${_var}=\"${_value}\"" + eval "export ${_var}" + + msg info "INJECTED: ${_var} = ${_value}" + + # save the disabled interfaces + _value=$(printf "${_disabled_eths}" | \ + sed -e 's/^[[:space:]]*//' \ + -e 's/[[:space:]]*$//' \ + -e 's/[[:space:]]\+/ /g') + + # inject a new context variable into the environment: + # ONEAPP_VNF__INTERFACES_DISABLED + eval "${_var}_DISABLED=\"${_value}\"" + eval "export ${_var}_DISABLED" + + msg info "INJECTED: ${_var}_DISABLED = ${_value}" + done +} + +sortout_vnfs() +{ + msg info "Sort out VNFs: ENABLED/DISABLED" + + for _vnf in ${ALL_SUPPORTED_VNF_NAMES} ; do + _value=$(eval "printf \"\$ONEAPP_VNF_${_vnf}_ENABLED\"" | \ + tr '[:upper:]' '[:lower:]') + + case "${_value}" in + 1|true|yes|t|y) + msg info "VNF ${_vnf} will be: ENABLED" + ENABLED_VNF_LIST="${ENABLED_VNF_LIST} ${_vnf}" + + # out of those enabled are any changed? + if is_changed "${_vnf}" ; then + msg info "VNF ${_vnf} is modified - it will be: RELOADED" + UPDATED_VNF_LIST="${UPDATED_VNF_LIST} ${_vnf}" + fi + ;; + ''|0|false|no|f|n) + msg info "VNF ${_vnf} will be: DISABLED" + DISABLED_VNF_LIST="${DISABLED_VNF_LIST} ${_vnf}" + ;; + *) + msg warning "Unknown value ('${_value}') for: ONEAPP_VNF_${_vnf}_ENABLED" + msg warning "VNF ${_vnf} will be: SKIPPED/UNCHANGED" + ;; + esac + done +} + +# arg: +is_running() +( + _vnfs="$1" + + for _vnf in $_vnfs ; do + case "$_vnf" in + DHCP4) + is_running_dhcp4 + return $? + ;; + DNS) + is_running_dns + return $? + ;; + NAT4) + # iptables is a kernel module, there is no process to be + # signaled - we want this to report: RUNNING + # + # that will always force reload_nat4 which basically replaces + # the NAT table with the correct rules everytime (stop, start) + return 0 # it IS running + ;; + SDNAT4) + is_running_sdnat4 + return $? + ;; + ROUTER4) + # sysctl has no process - we want this to report: NOT RUNNING + # + # by starting it each time we ensure that sysctl.conf is + # correct - reload would just reread the current file which + # could have had forwarding disabled... + return 1 # it is not running + ;; + LB) + is_running_lb + return $? + ;; + HAPROXY) + is_running_haproxy + return $? + ;; + KEEPALIVED) + is_running_keepalived + return $? + ;; + *) + msg error "Unknown VNF name: This is a bug - this should never happen" + ;; + esac + done +) + +# arg: +enable_vnfs() +{ + _enabled_vnfs="$1" + + for _vnf in $_enabled_vnfs ; do + case "$_vnf" in + DHCP4) + msg info "Enable DHCP4 VNF" + enable_dhcp4 + ;; + DNS) + msg info "Enable DNS VNF" + enable_dns + ;; + NAT4) + msg info "Enable NAT4 VNF" + enable_nat4 + ;; + SDNAT4) + msg info "Enable SDNAT4 VNF" + enable_sdnat4 + ;; + ROUTER4) + msg info "Enable ROUTER4 VNF" + enable_router4 + ;; + LB) + msg info "Enable LB VNF" + enable_lb + ;; + HAPROXY) + msg info "Enable HAPROXY VNF" + enable_haproxy + ;; + KEEPALIVED) + # skip this + : + ;; + *) + msg error "Unknown VNF name: This is a bug - this should never happen" + ;; + esac + done +} + +# arg: +start_vnfs() +{ + _enabled_vnfs="$1" + + for _vnf in $_enabled_vnfs ; do + case "$_vnf" in + DHCP4) + msg info "Start DHCP4 VNF" + start_dhcp4 + ;; + DNS) + msg info "Start DNS VNF" + start_dns + ;; + NAT4) + msg info "Start NAT4 VNF" + start_nat4 + ;; + SDNAT4) + msg info "Start SDNAT4 VNF" + start_sdnat4 + ;; + ROUTER4) + msg info "Start ROUTER4 VNF" + start_router4 + ;; + LB) + msg info "Start LB VNF" + start_lb + ;; + HAPROXY) + msg info "Start HAPROXY VNF" + start_haproxy + ;; + KEEPALIVED) + # skip this + : + ;; + *) + msg error "Unknown VNF name: This is a bug - this should never happen" + ;; + esac + done +} + +# arg: +stop_and_disable_vnfs() +{ + _disabled_vnfs="$1" + + for _vnf in $_disabled_vnfs ; do + case "$_vnf" in + DHCP4) + msg info "Stop and disable DHCP4 VNF" + disable_dhcp4 + stop_dhcp4 + ;; + DNS) + msg info "Stop and disable DNS VNF" + disable_dns + stop_dns + ;; + NAT4) + msg info "Stop and disable NAT4 VNF" + disable_nat4 + stop_nat4 + ;; + SDNAT4) + [ -n "${_SKIP_ONE_VNF}" ] && continue + msg info "Stop and disable SDNAT4 VNF" + disable_sdnat4 + stop_sdnat4 + ;; + ROUTER4) + msg info "Stop and disable ROUTER4 VNF" + disable_router4 + stop_router4 + ;; + LB) + [ -n "${_SKIP_ONE_VNF}" ] && continue + msg info "Stop and disable LB VNF" + disable_lb + stop_lb + ;; + HAPROXY) + [ -n "${_SKIP_ONE_VNF}" ] && continue + msg info "Stop and disable HAPROXY VNF" + disable_haproxy + stop_haproxy + ;; + KEEPALIVED) + msg info "Stop and disable KEEPALIVED VNF" + disable_keepalived + stop_keepalived + ;; + *) + msg error "Unknown VNF name: This is a bug - this should never happen" + ;; + esac + done +} + +# arg: +reload_vnfs() +{ + _updated_vnfs="$1" + + for _vnf in $_updated_vnfs ; do + case "$_vnf" in + DHCP4) + msg info "Reload DHCP4 VNF" + reload_dhcp4 + ;; + DNS) + msg info "Reload DNS VNF" + reload_dns + ;; + NAT4) + msg info "Reload NAT4 VNF" + reload_nat4 + ;; + SDNAT4) + msg info "Reload SDNAT4 VNF" + reload_sdnat4 + ;; + ROUTER4) + msg info "Reload ROUTER4 VNF" + reload_router4 + ;; + LB) + msg info "Reload LB VNF" + reload_lb + ;; + HAPROXY) + msg info "Reload HAPROXY VNF" + reload_haproxy + ;; + KEEPALIVED) + # skip this + : + ;; + *) + msg error "Unknown VNF name: This is a bug - this should never happen" + ;; + esac + done +} + +# TODO: if not running then just start them - if they are running reload them... +toggle_vnfs() +{ + msg info "Toggle VNF services (Start/Stop)" + + # do we have HA setup with keepalived? + if is_in_list KEEPALIVED "$ENABLED_VNF_LIST" ; then + # Keepalived's notify script will take care of services... + + msg info "Keepalived will take care of starting and stopping of VNFs" + + msg info "Stop and disable all VNFs except keepalived" + _vnfs=$(for _vnf in $ALL_SUPPORTED_VNF_NAMES ; do echo "$_vnf" ; done \ + | sed '/^KEEPALIVED$/d') + stop_and_disable_vnfs "$_vnfs" + + # first verify that keepalived has at least one vrrp instance otherwise + # it has nothing to do and it behaves undeterministically (it can be in + # all states: MASTER, BACKUP, FAULT - at least from my experience...) + if grep -q '^vrrp_instance ' "$VNF_KEEPALIVED_CONFIG" ; then + # enable and start keepalived + + msg info "Enable KEEPALIVED VNF" + enable_keepalived + + if is_running_keepalived ; then + # TODO: improve this + # we must always reload (actually restart) keepalived to trigger + # restart/reload of all changed VNFs + + #if is_in_list KEEPALIVED "$UPDATED_VNF_LIST" ; then + # msg info "Reload KEEPALIVED VNF" + # reload_keepalived + #fi + + msg info "Reload/restart KEEPALIVED VNF" + reload_keepalived + else + msg info "Start KEEPALIVED VNF" + start_keepalived + fi + else + # disable idle (no instance) keepalived + msg warning "Keepalived has no vrrp instance - it has nothing to do..." + stop_and_disable_vnfs 'KEEPALIVED' + fi + else + # no HA and no keepalived - that means we take care of VNFs... + + # stop and disable unrequested VNFs/services + stop_and_disable_vnfs "$DISABLED_VNF_LIST" + + # enable requested VNFs/services + enable_vnfs "$ENABLED_VNF_LIST" + + # reload/start updated and enabled VNFs/services + for _vnf in ${ENABLED_VNF_LIST} ; do + if is_running "${_vnf}" ; then + if is_in_list "${_vnf}" "$UPDATED_VNF_LIST" ; then + reload_vnfs "${_vnf}" + fi + else + start_vnfs "${_vnf}" + fi + done + fi +} + +# arg: +is_changed() +( + _vnf="$1" + + for i in $(get_changed_context_vars) ; do + if echo "$i" | grep -q \ + -e "^ONEAPP_VNF_${_vnf}_" \ + -e "^ONEAPP_VNF_LB[0-9]*_" \ + -e "^ONEAPP_VNF_HAPROXY_LB[0-9]*_" \ + -e "^ONEAPP_VROUTER_" \ + ; + then + return 0 + fi + done + + return 1 +) + +# args: