From d36d5e8f13b90d0aae8a6382c4abe8f4cc48d9b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 13 Sep 2023 11:46:35 +0200 Subject: [PATCH 01/40] Add init image based on Leap to be used by the server image This image is not needed for SUSE Manager as the BCI:Init image will be used instead. Changed the push-packages-to-obs.sh to ignore it there. --- containers/init-image/Dockerfile | 27 ++++++++++++++++++++++++ containers/init-image/_service | 4 ++++ containers/init-image/init-image.changes | 4 ++++ containers/init-image/tito.props | 2 ++ rel-eng/packages/init-image | 1 + rel-eng/push-packages-to-obs.sh | 9 ++++++-- 6 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 containers/init-image/Dockerfile create mode 100644 containers/init-image/_service create mode 100644 containers/init-image/init-image.changes create mode 100644 containers/init-image/tito.props create mode 100644 rel-eng/packages/init-image diff --git a/containers/init-image/Dockerfile b/containers/init-image/Dockerfile new file mode 100644 index 000000000000..75c8a0598f87 --- /dev/null +++ b/containers/init-image/Dockerfile @@ -0,0 +1,27 @@ +# Specify the license of the container build description (see also the LICENSE file) +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/init:latest + +FROM opensuse/leap:15.5 + +# Define labels according to https://en.opensuse.org/Building_derived_containers +# labelprefix=org.opensuse.uyuni.init +LABEL org.opencontainers.image.title="Systemd image" +LABEL org.opencontainers.image.description="This container runs systemd" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.opensuse.reference="registry.opensuse.org/uyuni/init:4.4.0.%RELEASE%" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="Uyuni project" +LABEL org.opencontainers.image.url="https://www.uyuni-project.org/" +# endlabelprefix + +# Fill the image with content and clean the cache(s) +RUN set -euo pipefail; zypper -n in --no-recommends systemd gzip; zypper -n clean; rm -rf /var/log/* +CMD ["/usr/lib/systemd/systemd"] + +RUN mkdir -p /etc/systemd/system.conf.d/ && \ + printf "[Manager]\nLogColor=no" > \ + /etc/systemd/system.conf.d/01-nocolor.conf +RUN systemctl disable getty@tty1.service +HEALTHCHECK --interval=5s --timeout=5s --retries=5 CMD ["/usr/bin/systemctl", "is-active", "multi-user.target"] diff --git a/containers/init-image/_service b/containers/init-image/_service new file mode 100644 index 000000000000..c8a5f650ca00 --- /dev/null +++ b/containers/init-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/init-image/init-image.changes b/containers/init-image/init-image.changes new file mode 100644 index 000000000000..ea72fbcf17cf --- /dev/null +++ b/containers/init-image/init-image.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Wed Sep 13 08:38:26 UTC 2023 - Cédric Bosdonnat + +- Initial image based on Leap 15.5 diff --git a/containers/init-image/tito.props b/containers/init-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/init-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/rel-eng/packages/init-image b/rel-eng/packages/init-image new file mode 100644 index 000000000000..117d1e5f0e99 --- /dev/null +++ b/rel-eng/packages/init-image @@ -0,0 +1 @@ +4.4.0 containers/init-image/ diff --git a/rel-eng/push-packages-to-obs.sh b/rel-eng/push-packages-to-obs.sh index ba754d1310b2..cdfd6072d119 100755 --- a/rel-eng/push-packages-to-obs.sh +++ b/rel-eng/push-packages-to-obs.sh @@ -55,8 +55,13 @@ function srpm_package_defs() { # done < <(srpm_package_defs) # test -n "$PACKAGE" || { - PACKAGE=$(find "$SRPM_DIR" -mindepth 1 -maxdepth 1 -type d -printf "%P\n" \ - | grep -v -x -e heirloom-pkgtools -e rhnclient -e smartpm -e jabberd-selinux -e oracle-rhnsat-selinux -e oracle-selinux -e oracle-xe-selinux -e spacewalk-monitoring-selinux -e spacewalk-proxy-selinux -e spacewalk-selinux -e cx_Oracle -e apt-spacewalk -e perl-DBD-Oracle) + if test "$OSCAPI" == "https://api.suse.de"; then + # The init-image is not needed for SUMA: BCI:Init will be used instead + PACKAGE=$(find "$SRPM_DIR" -mindepth 1 -maxdepth 1 -type d -printf "%P\n" \ + | grep -v -x -e init-image) + else + PACKAGE=$(find "$SRPM_DIR" -mindepth 1 -maxdepth 1 -type d -printf "%P\n") + fi } for N in $PACKAGE; do test -d "$SRPM_DIR/$N" || { From 3301f281d2b7d94822b2612ad3bd660f373a02a1 Mon Sep 17 00:00:00 2001 From: Artem Shiliaev Date: Fri, 27 Jan 2023 15:56:17 +0100 Subject: [PATCH 02/40] initial version uyuni server image --- containers/server-image/.env | 23 +++++ containers/server-image/Dockerfile | 85 +++++++++++++++++++ containers/server-image/README.md | 4 + containers/server-image/_constraints | 7 ++ containers/server-image/_service | 4 + containers/server-image/java_agent.yaml | 6 ++ containers/server-image/postgres-exporter | 19 +++++ .../postgres_exporter_queries.yaml | 52 ++++++++++++ containers/server-image/remove_unused.sh | 7 ++ containers/server-image/server-image.changes | 1 + containers/server-image/taskomatic_jmx.conf | 2 + containers/server-image/tito.props | 2 + containers/server-image/tomcat_jmx.conf | 2 + rel-eng/packages/server-image | 1 + 14 files changed, 215 insertions(+) create mode 100644 containers/server-image/.env create mode 100644 containers/server-image/Dockerfile create mode 100644 containers/server-image/README.md create mode 100644 containers/server-image/_constraints create mode 100644 containers/server-image/_service create mode 100644 containers/server-image/java_agent.yaml create mode 100644 containers/server-image/postgres-exporter create mode 100644 containers/server-image/postgres_exporter_queries.yaml create mode 100755 containers/server-image/remove_unused.sh create mode 100644 containers/server-image/server-image.changes create mode 100644 containers/server-image/taskomatic_jmx.conf create mode 100644 containers/server-image/tito.props create mode 100644 containers/server-image/tomcat_jmx.conf create mode 100644 rel-eng/packages/server-image diff --git a/containers/server-image/.env b/containers/server-image/.env new file mode 100644 index 000000000000..cce186fe910a --- /dev/null +++ b/containers/server-image/.env @@ -0,0 +1,23 @@ +# MANAGER_USER= +# MANAGER_PASS= +# MANAGER_ADMIN_EMAIL= +# CERT_O= +# CERT_OU= +# CERT_CITY= +# CERT_STATE= +# CERT_COUNTRY= +# CERT_EMAIL= +# CERT_PASS= +# USE_EXISTING_CERTS= +# MANAGER_DB_NAME= +# MANAGER_DB_HOST= +# MANAGER_DB_PORT= +# MANAGER_DB_PROTOCOL= +# MANAGER_ENABLE_TFTP= +# SCC_USER= +# SCC_PASS= +# REPORT_DB_HOST= +# REPORT_DB_PORT= +# REPORT_DB_NAME= +# REPORT_DB_USER= +# REPORT_DB_PASS= diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile new file mode 100644 index 000000000000..dab8bfdae92a --- /dev/null +++ b/containers/server-image/Dockerfile @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/server:latest + +ARG INIT_BASE=uyuni/init:latest +FROM $INIT_BASE + +ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" + +COPY remove_unused.sh . +RUN echo "rpm.install.excludedocs = yes" >>/etc/zypp/zypp.conf + +# Main packages +RUN zypper ref && zypper --non-interactive up +RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-licenses --force-resolution \ + ${PRODUCT_PATTERN_PREFIX}_server \ + ${PRODUCT_PATTERN_PREFIX}_retail \ + ed \ + susemanager-tftpsync \ + golang-github-prometheus-node_exporter \ + prometheus-postgres_exporter \ + golang-github-QubitProducts-exporter_exporter \ + prometheus-jmx_exporter \ + prometheus-jmx_exporter-tomcat \ + spacecmd \ + grafana-formula \ + locale-formula \ + prometheus-exporters-formula \ + prometheus-formula \ + registry-formula \ + virtualization-formulas \ + uyuni-config-formula \ + inter-server-sync \ + golang-github-lusitaniae-apache_exporter \ + golang-github-prometheus-node_exporter \ + prometheus-postgres_exporter \ + golang-github-QubitProducts-exporter_exporter \ + prometheus-jmx_exporter \ + spacecmd \ + javamail \ + libyui-ncurses-pkg16 \ + virtual-host-gatherer-Kubernetes \ + virtual-host-gatherer-libcloud \ + virtual-host-gatherer-Libvirt \ + virtual-host-gatherer-Nutanix \ + virtual-host-gatherer-VMware \ + vim \ + ipmitool + +RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb + +RUN mkdir -p /etc/postgres_exporter \ + /etc/prometheus-jmx_exporter/tomcat \ + /usr/lib/systemd/system/tomcat.service.d \ + /etc/prometheus-jmx_exporter/taskomatic \ + /usr/lib/systemd/system/taskomatic.service.d + +COPY postgres_exporter_queries.yaml /etc/postgres_exporter/postgres_exporter_queries.yaml +COPY postgres-exporter /etc/sysconfig/prometheus-postgres_exporter +COPY java_agent.yaml /etc/prometheus-jmx_exporter/tomcat/java_agent.yml +COPY java_agent.yaml /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml +COPY tomcat_jmx.conf /usr/lib/systemd/system/tomcat.service.d/jmx.conf +COPY taskomatic_jmx.conf /usr/lib/systemd/system/taskomatic.service.d/jmx.conf + +RUN systemctl enable prometheus-node_exporter + +# LABELs +ARG PRODUCT=Uyuni +ARG VENDOR="Uyuni project" +ARG URL="https://www.uyuni-project.org/" +ARG REFERENCE_PREFIX="registry.opensuse.org/uyuni" + +# Build Service required labels +# labelprefix=org.opensuse.uyuni.server +LABEL org.opencontainers.image.title="${PRODUCT} server container" +LABEL org.opencontainers.image.description="All-in-one ${PRODUCT} server image" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="${VENDOR}" +LABEL org.opencontainers.image.url="${URL}" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opensuse.reference="${REFERENCE_PREFIX}/server:4.4.0.%RELEASE%" +# endlabelprefix + +CMD ["/usr/lib/systemd/systemd"] +HEALTHCHECK --interval=5s --timeout=5s --retries=5 CMD ["/usr/bin/systemctl", "is-active", "multi-user.target"] diff --git a/containers/server-image/README.md b/containers/server-image/README.md new file mode 100644 index 000000000000..44ff672dda2e --- /dev/null +++ b/containers/server-image/README.md @@ -0,0 +1,4 @@ +# Known issues + +* Avahi names are not resolved inside the container + diff --git a/containers/server-image/_constraints b/containers/server-image/_constraints new file mode 100644 index 000000000000..e6e2c4c0e019 --- /dev/null +++ b/containers/server-image/_constraints @@ -0,0 +1,7 @@ + + + + 10 + + + diff --git a/containers/server-image/_service b/containers/server-image/_service new file mode 100644 index 000000000000..bde87fa5bc1f --- /dev/null +++ b/containers/server-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/server-image/java_agent.yaml b/containers/server-image/java_agent.yaml new file mode 100644 index 000000000000..50cd72ebba9f --- /dev/null +++ b/containers/server-image/java_agent.yaml @@ -0,0 +1,6 @@ +whitelistObjectNames: + - java.lang:type=Threading,* + - java.lang:type=Memory,* + - Catalina:type=ThreadPool,name=* +rules: + - pattern: ".*" diff --git a/containers/server-image/postgres-exporter b/containers/server-image/postgres-exporter new file mode 100644 index 000000000000..4a8011acf428 --- /dev/null +++ b/containers/server-image/postgres-exporter @@ -0,0 +1,19 @@ +## Path: Applications/PostgreSQLExporter +## Description: Prometheus exporter for PostgreSQL +## Type: string() +## Default: "postgresql://user:passwd@localhost:5432/database?sslmode=disable" +## ServiceRestart: postgres-exporter +# +# Connection URL to postgresql instance +# +DATA_SOURCE_NAME="postgresql://spacewalk:spacewalk@localhost:5432/susemanager?sslmode=disable" + +## Path: Applications/PostgreSQLExporter +## Description: Prometheus exporter for PostgreSQL +## Type: string() +## Default: "" +## ServiceRestart: postgres-exporter +# +# Extra options for postgres-exporter +# +POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml" diff --git a/containers/server-image/postgres_exporter_queries.yaml b/containers/server-image/postgres_exporter_queries.yaml new file mode 100644 index 000000000000..f6b3d362880f --- /dev/null +++ b/containers/server-image/postgres_exporter_queries.yaml @@ -0,0 +1,52 @@ +mgr_serveractions: + query: | + SELECT ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name = 'Queued' + ) + ) AS queued, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name = 'Picked Up' + ) + ) AS picked_up, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name IN ('Completed') + ) + ) AS completed, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name IN ('Failed') + ) + ) AS failed; + metrics: + - queued: + usage: "GAUGE" + description: "Count of queued Actions" + - picked_up: + usage: "GAUGE" + description: "Count of picked up Actions" + - completed: + usage: "COUNTER" + description: "Count of completed Actions" + - failed: + usage: "COUNTER" + description: "Count of failed Actions" + salt_events: + query: | + SELECT COUNT(*) + FROM suseSaltEvent + AS salt_events_count; + metrics: + - salt_events_count: + usage: "GAUGE" + description: "Count of suse salt events" diff --git a/containers/server-image/remove_unused.sh b/containers/server-image/remove_unused.sh new file mode 100755 index 000000000000..ac95e70ecf0f --- /dev/null +++ b/containers/server-image/remove_unused.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Removes any unnecessary files and packages before moving to the next build stage + +set -xe + +zypper clean --all +rpm -e zypper diff --git a/containers/server-image/server-image.changes b/containers/server-image/server-image.changes new file mode 100644 index 000000000000..3d281727b618 --- /dev/null +++ b/containers/server-image/server-image.changes @@ -0,0 +1 @@ +- Initialized a server image diff --git a/containers/server-image/taskomatic_jmx.conf b/containers/server-image/taskomatic_jmx.conf new file mode 100644 index 000000000000..7f19d11ddb83 --- /dev/null +++ b/containers/server-image/taskomatic_jmx.conf @@ -0,0 +1,2 @@ +[Service] +Environment="JAVA_AGENT=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5557:/etc/prometheus-jmx_exporter/taskomatic/java_agent.yml" diff --git a/containers/server-image/tito.props b/containers/server-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/server-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/containers/server-image/tomcat_jmx.conf b/containers/server-image/tomcat_jmx.conf new file mode 100644 index 000000000000..a31b816897fe --- /dev/null +++ b/containers/server-image/tomcat_jmx.conf @@ -0,0 +1,2 @@ +[Service] +Environment="CATALINA_OPTS=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5556:/etc/prometheus-jmx_exporter/tomcat/java_agent.yml" diff --git a/rel-eng/packages/server-image b/rel-eng/packages/server-image new file mode 100644 index 000000000000..fe257db88808 --- /dev/null +++ b/rel-eng/packages/server-image @@ -0,0 +1 @@ +4.4.0 containers/server-image/ From 91db4b6dc8c8ae35f20a88a473c9aaf8c32769e6 Mon Sep 17 00:00:00 2001 From: Ricardo Mestre Date: Tue, 14 Feb 2023 16:26:47 +0100 Subject: [PATCH 03/40] Add ant rules to deploy to a container server Uses uyunictl to deploy on a server container running either in kubernetes or on podman. --- java/manager-build.xml | 146 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 141 insertions(+), 5 deletions(-) diff --git a/java/manager-build.xml b/java/manager-build.xml index c2689a520d62..cc520f348332 100644 --- a/java/manager-build.xml +++ b/java/manager-build.xml @@ -17,10 +17,13 @@ + + + @@ -226,7 +229,14 @@ yarn is not in the PATH. Please install yarn first. - + + + + + + + + @@ -235,10 +245,6 @@ - - - - @@ -294,6 +300,136 @@ + + + + + + + + + uyunictl is not in the PATH. Please install uyunictl first. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 4abaec95c1dd67fc7d4737c551d52ae3935b2ccc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 16 Feb 2023 22:06:33 +0100 Subject: [PATCH 04/40] Add notes on how to use the server container image --- containers/doc/server-kubernetes/README.md | 284 +++++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 containers/doc/server-kubernetes/README.md diff --git a/containers/doc/server-kubernetes/README.md b/containers/doc/server-kubernetes/README.md new file mode 100644 index 000000000000..7e11ea31ca60 --- /dev/null +++ b/containers/doc/server-kubernetes/README.md @@ -0,0 +1,284 @@ +# Prerequisites + +The following assumes you have either a single-node RKE2 or K3s cluster ready or a server with Podman installed and enough resources for the Uyuni server. +When installing on a Kubernetes cluster, it also assumes that `kubectl` and `helm` are installed on the server and configured to connect to the cluster. + +Note that in the case of a k3s or rke2 cluster the kubeconfig will be discovered in the default `/etc/rancher` folder: there is no need to set `KUBECONFIG` or copy the file to `~/.kube/config`. + +# Preparing the installation + +## Podman specific setup + +Podman stores its volumes in `/var/lib/containers/storage/volumes/`. +In order to provide custom storage for the volumes, mount disks on that path oreven the expected volume path inside it like `/var/lib/containers/storage/volumes/var-spacewalk`. + +**This needs to be performed before installing Uyuni as the volumes will be populated at that time.** + +## RKE2 specific setup + +RKE2 doesn't have automatically provisioning Persistent Volume by default. +Either the expected Persisten Volumes need to be created before hand or a storage class with automatic provisioning has to be defined before installing Uyuni. + +## K3s specific setup + +The installation will work perfectly fine without changing anything, but tuning the storage class may be needed to avoid using the local path provisioner. + +# Offline installation + + +## For K3s + +With K3s it is possible to preload the container images and avoid it to be fetched from a registry. +For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. +For example: + +⚠️ **TODO**: Verify instructions +``` +for image in cert-manager-cainjector cert-manager-controller cert-manager-ctl cert-manager-webhook; do + podman pull quay.io/jetstack/$image + podman save --output $image.tar quay.io/jetstack/$image:latest +done + +podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + +podman save --output server.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +or + +⚠️ **TODO**: Verify instructions +``` +for image in cert-manager-cainjector cert-manager-controller cert-manager-ctl cert-manager-webhook; do + skopeo copy docker://quay.io/jetstack/$image:latest docker-archive:$image.tar:quay.io/jetstack/$image:latest +done + +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +Copy the `cert-manager` and `uyuni/server` helm charts locally: + +⚠️ **TODO**: verify instructions + +``` +helm pull --repo https://charts.jetstack.io --destination . cert-manager +helm pull --destination . oci://registry.opensuse.org/uyuni/server +``` + +Transfer the resulting `*.tar` images to the K3s node and load them using the following command: + +``` +for archive in `ls *.tar`; do + k3s ctr images import $archive +done +``` + +In order to tell K3s to not pull the images, set the image pull policy needs to be set to `Never`. +This needs to be done for both Uyuni and cert-manager helm charts. + +For the Uyuni helm chart, set the `pullPolicy` chart value to `Never` by passing a `--helm-uyuni-values=uyuni-values.yaml` parameter to `uyuniadm install` with the following `uyuni-values.yaml` file content: + +``` +pullPolicy: Never +``` + +For the cert-manager helm chart, create a `cert-values.yaml` file with the following content and pass `--helm-certmanager-values=values.yaml` parameter to `uyuniadm install`: + +``` +image: + pullPolicy: Never +``` + +⚠️ **TODO**: verify the file names +To use the downloaded helm charts instead of the default ones, pass `--helm-uyuni-chart=server.tgz` and `--helm-certmanager-chart=cert-manager.tgz` or add the following to the `uyuniadm` configuration file: + +``` +helm: + uyuni: + chart: server.tgz + values: uyuni-values.yaml + certmanager: + chart: cert-manager.tgz + values: cert.values.yaml +``` + +## For RKE2 + +RKE2 doesn't allow to preload images on the nodes. +Instead, use `skopeo` to import the images in a local registry and use this one to install. + +Copy the `cert-manager` and `uyuni/server` helm charts locally: + +⚠️ **TODO**: verify instructions + +``` +helm pull --repo https://charts.jetstack.io --destination . cert-manager +helm pull --destination . oci://registry.opensuse.org/uyuni/server +``` + +⚠️ **TODO** Prepare instructions +``` +# TODO Copy the cert-manager and uyuni images +# TODO Set the uyuniadm parameters +``` + +## For Podman + +With K3s it is possible to preload the container images and avoid it to be fetched from a registry. +For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. +For example: + +``` +podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +podman save --output server-image.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +or + +``` +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server-image.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +Transfer the resulting `server-image.tar` to the server and load it using the following command: + +``` +podman load -i server-image.tar +``` + +# Migrating from a regular server + +In order to migrate a regular Uyuni server to containers, a new machine is required: it is not possible to perform an in-place migration. +The old server is designated as the source server and the new machine is the destination one. + +The migration procedure does not perform any hostname rename. +The fully qualified domain name will be the same on the new server than on the source one. +This means the DNS records need to be adjusted after the migration to use the new server. + +## Preparing + +### Stop the source server + +Stop the source services: + +``` +spacewalk-service stop +systemctl stop postgresql +``` + +### Preparing the SSH connection + +The `SSH` configuration and agent should be ready on the host for a passwordless connection to the source server. +The migration script only uses the source server fully qualified domain name in the SSH command. +This means that every other configuration required to connect needs to be defined in the `~/.ssh/config` file. + +For a passwordless connection, the migration script will use an SSH agent on the server. +If none is running yet, run `eval $(ssh-agent)`. +Add the SSH key to the running agent using `ssh-add /path/to/the/private/key`. +The private key password will be prompted. + +### Prepare for Kubernetes + +Since the migration job will start the container from scratch the Persistent Volumes need to be defined before running the `uyuniadm migrate command`. +Refer to the installation section for more details on the volumes preparation. + +## Migrating + +Run the following command to install a new Uyuni server from the source one after replacing the `uyuni.source.fqdn` by the proper source server FQDN: +This command will synchronize all the data from the source server to the new one: this can take time! + +``` +uyuniadm migrate podman uyuni.source.fqdn +``` + +or + +``` +uyuniadm migrate kubernetes uyuni.source.fqdn +``` + +# Installing Uyuni + +## Installing + +The installation using `uyuniadm install` will ask for the password if those are not provided using the command line parameters or the configuration file. +For security reason, using command line parameters to specify passwords should be avoided: use the configuration file with proper permissions instead. + +Prepare an `uyuniadm.yaml` file like the following: + +``` +db: + password: MySuperSecretDBPass +cert: + password: MySuperSecretCAPass +``` + +To dismiss the email prompts add the `email` and `emailFrom` configurations to the above file or use the `--email` and `--emailFrom` parameters for `uyuniadm install`. + +Run one of the following command to install after replacing the `uyuni.example.com` by the FQDN of the server to install: + +``` +uyuniadm -c uyuniadm.yaml install podman uyuni.example.com +``` + +or + +``` +uyuniadm -c uyuniadm.yaml install kubernetes uyuni.example.com +``` + +### Podman specific configuration + +Additional parameters can be passed to Podman using `--podman-arg` parameters. + +### Kubernetes specific configuration + +The `uyuniadm install` command comes with parameters and thus configuration values for advanced helm chart configuration. +To pass additional values to the Uyuni helm chart at installation time, use the `--helm-uyuni-values chart-values.yaml` parameter or a configuration like the following: + +``` +helm: + uyuni: + values: chart-values.yaml +``` + +The path set as value for this configuration is a YAML file passed to the Uyuni Helm chart. +Be aware that some of the values in this file will be overriden by the `uyuniadm install` parameters. + +Note that the Helm chart installs a deployment with one replica. +The pod name is automatically generated by Kubernetes and changes at every start. + + +# Using Uyuni in containers + +To get a shell in the pod run `uyunictl exec -ti bash`. +Note that this command can be used to run any command inside the server like `uyunictl exec tail /var/log/rhn/rhn_web_ui.log` + +To copy files to the server, use the `uyunictl cp server:` command. +Conversely to copy files from the server use `uyunictl cp server: `. + +# Developping with the containers + +## Deploying code + +To deploy java code on the pod change to the `java` directory and run: + +``` +ant -f manager-build.xml refresh-branding-jar deploy-restart-container +``` + +**Note** To deploy TSX or Salt code, use the `deploy-static-resources-container` and `deploy-salt-files-container` tasks of the ant file. + +## Attaching a java debugger + +In order to attach a Java debugger Uyuni need to have been installed using the `--debug-java` option to setup the container to listen on JDWP ports and expose them. + +The debugger can now be attached to the usual ports (8003 for tomcat and 8001 for taskomatic and 8002 for the search server) on the host FQDN. + +# Uninstalling + +To remove everything including the volumes, run the following command: + +``` +uyuniadm uninstall --purge-volumes +``` + +Note that `cert-manager` will not be uninstalled if it was not installed by `uyuniadm`. From cdd98c99d53c5c16bb4af4a23282d9de43f27622 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 1 Sep 2023 18:03:34 +0200 Subject: [PATCH 05/40] Use a MIRROR_PATH environment variable at setup for containers --- susemanager/bin/mgr-setup | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 6d2cb1b8395b..39c61c6f56c3 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -923,6 +923,11 @@ do_setup() { setup_spacewalk + # In the container case, we have the MIRROR_PATH environment variable at setup + if [ -n "$MIRROR_PATH" ]; then + echo "server.susemanager.fromdir = $MIRROR_PATH" >> /etc/rhn/rhn.conf + fi + if [ -n "$ISS_PARENT" ]; then local certname=`echo "MASTER-$ISS_PARENT-TRUSTED-SSL-CERT" | sed 's/\./_/g'` curl -s -S -o /usr/share/rhn/$certname "http://$ISS_PARENT/pub/RHN-ORG-TRUSTED-SSL-CERT" From fd11c2cba658dabeaed523d53d65136313914a43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 1 Sep 2023 18:04:22 +0200 Subject: [PATCH 06/40] Add helm chart for the server container Provide a helm chart to ease installation of the server container on kubernetes cluster. --- containers/server-helm/.helmignore | 23 + containers/server-helm/Chart.yaml | 9 + containers/server-helm/_service | 3 + containers/server-helm/charts/.gitkeep | 0 containers/server-helm/server-helm.changes | 4 + containers/server-helm/templates/_helpers.tpl | 11 + .../server-helm/templates/deployment.yaml | 659 ++++++++++++++++++ containers/server-helm/templates/ingress.yaml | 187 +++++ .../templates/k3s-ingress-routes.yaml | 154 ++++ containers/server-helm/templates/service.yaml | 94 +++ containers/server-helm/templates/volumes.yaml | 599 ++++++++++++++++ containers/server-helm/tito.props | 2 + containers/server-helm/values.yaml | 78 +++ rel-eng/packages/server-helm | 1 + 14 files changed, 1824 insertions(+) create mode 100644 containers/server-helm/.helmignore create mode 100644 containers/server-helm/Chart.yaml create mode 100644 containers/server-helm/_service create mode 100644 containers/server-helm/charts/.gitkeep create mode 100644 containers/server-helm/server-helm.changes create mode 100644 containers/server-helm/templates/_helpers.tpl create mode 100644 containers/server-helm/templates/deployment.yaml create mode 100644 containers/server-helm/templates/ingress.yaml create mode 100644 containers/server-helm/templates/k3s-ingress-routes.yaml create mode 100644 containers/server-helm/templates/service.yaml create mode 100644 containers/server-helm/templates/volumes.yaml create mode 100644 containers/server-helm/tito.props create mode 100644 containers/server-helm/values.yaml create mode 100644 rel-eng/packages/server-helm diff --git a/containers/server-helm/.helmignore b/containers/server-helm/.helmignore new file mode 100644 index 000000000000..0e8a0eb36f4c --- /dev/null +++ b/containers/server-helm/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/containers/server-helm/Chart.yaml b/containers/server-helm/Chart.yaml new file mode 100644 index 000000000000..6d5111b4fbf8 --- /dev/null +++ b/containers/server-helm/Chart.yaml @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/server:latest +apiVersion: v2 +name: server +description: Uyuni server containers. +type: application +home: https://www.uyuni-project.org/ +icon: https://www.uyuni-project.org/img/uyuni-logo.svg +version: 4.4.0 diff --git a/containers/server-helm/_service b/containers/server-helm/_service new file mode 100644 index 000000000000..dc713a1f9381 --- /dev/null +++ b/containers/server-helm/_service @@ -0,0 +1,3 @@ + + + diff --git a/containers/server-helm/charts/.gitkeep b/containers/server-helm/charts/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/containers/server-helm/server-helm.changes b/containers/server-helm/server-helm.changes new file mode 100644 index 000000000000..efc36b9365e0 --- /dev/null +++ b/containers/server-helm/server-helm.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Thu Mar 9 13:43:51 UTC 2023 - Cédric Bosdonnat + +- Initial version diff --git a/containers/server-helm/templates/_helpers.tpl b/containers/server-helm/templates/_helpers.tpl new file mode 100644 index 000000000000..2cba9281b6b8 --- /dev/null +++ b/containers/server-helm/templates/_helpers.tpl @@ -0,0 +1,11 @@ +{{- define "deployment.container.image" -}} +{{- $imageName := .name -}} +{{- $uri := (printf "%s/%s:%s" .global.Values.repository $imageName .global.Values.version) | default .global.Chart.AppVersion -}} +{{- if .global.Values.images -}} +{{- $image := (get .global.Values.images $imageName) -}} +{{- if $image -}} +{{- $uri = $image -}} +{{- end -}} +{{- end -}} +{{- $uri -}} +{{- end -}} \ No newline at end of file diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml new file mode 100644 index 000000000000..1eadab2c3f25 --- /dev/null +++ b/containers/server-helm/templates/deployment.yaml @@ -0,0 +1,659 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: uyuni + namespace: "{{ .Release.Namespace }}" +spec: + replicas: 1 + selector: + matchLabels: + app: uyuni + template: + metadata: + labels: + app: uyuni + spec: + initContainers: +{{- if not .Values.migration }} + - name: init-etc-tls + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/pki/tls /mnt; + chmod --reference=/etc/pki/tls /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/pki/tls/. /mnt; + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/spacewalk.key; + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/pg-spacewalk.key; + chown postgres:postgres /mnt/private/pg-spacewalk.key; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tls + - name: tls-key + mountPath: /etc/pki/spacewalk-tls +{{- end }} + - name: init-var-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/cobbler /mnt; + chmod --reference=/var/lib/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cobbler + - name: init-var-pgsql + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/pgsql /mnt; + chmod --reference=/var/lib/pgsql /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/pgsql/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-pgsql + - name: init-var-cache + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/cache /mnt; + chmod --reference=/var/cache /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/cache/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cache + - name: init-var-log + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/log /mnt; + chmod --reference=/var/log /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/log/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-log + - name: init-srv-salt + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/salt /mnt; + chmod --reference=/srv/salt /mnt + volumeMounts: + - mountPath: /mnt + name: srv-salt + - name: init-srv-www-pub + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/htdocs/pub /mnt; + chmod --reference=/srv/www/htdocs/pub /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/htdocs/pub/. /mnt; + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt/RHN-ORG-TRUSTED-SSL-CERT; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-pub + - name: init-srv-www-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/cobbler /mnt; + chmod --reference=/srv/www/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-cobbler + - name: init-srv-www-osimages + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/os-images /mnt; + chmod --reference=/srv/www/os-images /mnt + volumeMounts: + - mountPath: /mnt + name: srv-www-osimages + - name: init-srv-tftpboot + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/tftpboot /mnt; + chmod --reference=/srv/tftpboot /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/tftpboot/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-tftpboot + - name: init-srv-formulametadata + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/formula_metadata /mnt; + chmod --reference=/srv/formula_metadata /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/formula_metadata/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-formulametadata + - name: init-srv-pillar + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/pillar /mnt; + chmod --reference=/srv/pillar /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/pillar/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-pillar + - name: init-srv-susemanager + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/susemanager /mnt; + chmod --reference=/srv/susemanager /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/susemanager/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-susemanager + - name: init-srv-spacewalk + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/spacewalk /mnt; + chmod --reference=/srv/spacewalk /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/spacewalk/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-spacewalk + - name: init-root + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/root /mnt; + chmod --reference=/root /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /root/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: root + - name: init-etc-apache2 + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/apache2 /mnt; + chmod --reference=/etc/apache2 /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/apache2/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-apache2 + - name: init-etc-rhn + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/rhn /mnt; + chmod --reference=/etc/rhn /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/rhn/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-rhn + - name: init-etc-systemd-multi + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/systemd/system/multi-user.target.wants /mnt; + chmod --reference=/etc/systemd/system/multi-user.target.wants /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/systemd/system/multi-user.target.wants/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-systemd-multi + - name: init-etc-systemd-sockets + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/systemd/system/sockets.target.wants /mnt; + chmod --reference=/etc/systemd/system/sockets.target.wants /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/systemd/system/sockets.target.wants/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-systemd-sockets + - name: init-etc-salt + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/salt /mnt; + chmod --reference=/etc/salt /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/salt/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-salt + - name: init-etc-tomcat + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/tomcat /mnt; + chmod --reference=/etc/tomcat /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/tomcat/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tomcat + - name: init-etc-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/cobbler /mnt; + chmod --reference=/etc/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-cobbler + - name: init-etc-sysconfig + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/sysconfig /mnt; + chmod --reference=/etc/sysconfig /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/sysconfig/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-sysconfig + - name: init-etc-postfix + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/postfix /mnt; + chmod --reference=/etc/postfix /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/postfix/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-postfix + containers: + - name: uyuni + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: 443 + - containerPort: 80 + - containerPort: 4505 + - containerPort: 4506 + - containerPort: 69 +{{- if .Values.enableMonitoring | default true }} + - containerPort: 9100 + - containerPort: 9187 + - containerPort: 9800 +{{- end }} + protocol: UDP + - containerPort: 25151 + - containerPort: 5432 +{{- if .Values.exposeJavaDebug | default false }} + - containerPort: 8001 + - containerPort: 8002 + - containerPort: 8003 +{{- end }} + env: + - name: TZ + value: {{ .Values.timezone | default "Etc/UTC" }} +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} + - name: MIRROR_PATH + value: /mirror +{{- end }} +{{- if and .Values.migration (and .Values.migration.ssh .Values.migration.ssh.agentSocket) }} + - name: SSH_AUTH_SOCK + value: /tmp/ssh_auth_sock +{{- end }} + volumeMounts: + - mountPath: /run + name: tmp + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /var/lib/cobbler + name: var-cobbler + - mountPath: /var/lib/pgsql + name: var-pgsql + - mountPath: /var/cache + name: var-cache + - mountPath: /var/spacewalk + name: var-spacewalk + - mountPath: /var/log + name: var-log + - mountPath: /srv/salt + name: srv-salt + - mountPath: /srv/www/htdocs/pub + name: srv-www-pub + - mountPath: /srv/www/cobbler + name: srv-www-cobbler + - mountPath: /srv/www/os-images + name: srv-www-osimages + - mountPath: /srv/tftpboot + name: srv-tftpboot + - mountPath: /srv/formula_metadata + name: srv-formulametadata + - mountPath: /srv/pillar + name: srv-pillar + - mountPath: /srv/susemanager + name: srv-susemanager + - mountPath: /srv/spacewalk + name: srv-spacewalk + - mountPath: /root + name: root + - mountPath: /etc/apache2 + name: etc-apache2 + - mountPath: /etc/rhn + name: etc-rhn + - mountPath: /etc/systemd/system/multi-user.target.wants + name: etc-systemd-multi + - mountPath: /etc/systemd/system/sockets.target.wants + name: etc-systemd-sockets + - mountPath: /etc/salt + name: etc-salt + - mountPath: /etc/tomcat + name: etc-tomcat + - mountPath: /etc/cobbler + name: etc-cobbler + - mountPath: /etc/sysconfig + name: etc-sysconfig + - mountPath: /etc/postfix + name: etc-postfix +{{- if not .Values.migration }} + - mountPath: /etc/pki/tls + name: etc-tls + - name: ca-cert + mountPath: /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT + readOnly: true + subPath: ca.crt + - name: tls-key + mountPath: /etc/pki/spacewalk-tls +{{- end }} +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} + - name: mirror + mountPath: /mirror +{{- end }} +{{- if .Values.migration }} + {{- if .Values.migration.ssh }} + {{- if .Values.migration.ssh.agentSocket }} + - name: ssh-auth-sock + mountPath: /tmp/ssh_auth_sock + {{- end }} + {{- if .Values.migration.ssh.configPath }} + - name: ssh-config + mountPath: /root/.ssh/config + {{- end }} + {{- if .Values.migration.ssh.knownHostsPath }} + - name: ssh-known-hosts + mountPath: /root/.ssh/known_hosts + {{- end }} + {{- end }} + {{- if .Values.migration.dataPath }} + - name: migration-data + mountPath: /var/lib/uyuni-tools + {{- end }} +{{- end }} + volumes: + - name: tmp + emptyDir: + medium: Memory + sizeLimit: 256Mi + - name: cgroup + hostPath: + path: /sys/fs/cgroup + type: Directory + - name: var-cobbler + persistentVolumeClaim: + claimName: var-cobbler + - name: var-pgsql + persistentVolumeClaim: + claimName: var-pgsql + - name: var-cache + persistentVolumeClaim: + claimName: var-cache + - name: var-spacewalk + persistentVolumeClaim: + claimName: var-spacewalk + - name: var-log + persistentVolumeClaim: + claimName: var-log + - name: srv-salt + persistentVolumeClaim: + claimName: srv-salt + - name: srv-www-pub + persistentVolumeClaim: + claimName: srv-www-pub + - name: srv-www-cobbler + persistentVolumeClaim: + claimName: srv-www-cobbler + - name: srv-www-osimages + persistentVolumeClaim: + claimName: srv-www-osimages + - name: srv-tftpboot + persistentVolumeClaim: + claimName: srv-tftpboot + - name: srv-formulametadata + persistentVolumeClaim: + claimName: srv-formulametadata + - name: srv-pillar + persistentVolumeClaim: + claimName: srv-pillar + - name: srv-susemanager + persistentVolumeClaim: + claimName: srv-susemanager + - name: srv-spacewalk + persistentVolumeClaim: + claimName: srv-spacewalk + - name: root + persistentVolumeClaim: + claimName: root + - name: etc-apache2 + persistentVolumeClaim: + claimName: etc-apache2 + - name: etc-rhn + persistentVolumeClaim: + claimName: etc-rhn + - name: etc-systemd-multi + persistentVolumeClaim: + claimName: etc-systemd-multi + - name: etc-systemd-sockets + persistentVolumeClaim: + claimName: etc-systemd-sockets + - name: etc-salt + persistentVolumeClaim: + claimName: etc-salt + - name: etc-tomcat + persistentVolumeClaim: + claimName: etc-tomcat + - name: etc-cobbler + persistentVolumeClaim: + claimName: etc-cobbler + - name: etc-sysconfig + persistentVolumeClaim: + claimName: etc-sysconfig + - name: etc-postfix + persistentVolumeClaim: + claimName: etc-postfix +{{- if not .Values.migration }} + - name: ca-cert + configMap: + name: uyuni-ca + - name: etc-tls + persistentVolumeClaim: + claimName: etc-tls + - name: tls-key + secret: + secretName: uyuni-cert + items: + - key: tls.crt + path: spacewalk.crt + - key: tls.key + path: spacewalk.key + mode: 0600 +{{- end }} +{{- if .Values.mirror }} + {{- if .Values.mirror.claimName }} + - name: mirror + persistentVolumeClaim: + claimName: {{ .Values.mirror.claimName }} + {{- else if .Values.mirror.hostPath }} + - name: mirror + hostPath: + path: {{ .Values.mirror.hostPath }} + {{- end }} +{{- end }} +{{- if .Values.migration }} + {{- if .Values.migration.ssh }} + {{- if .Values.migration.ssh.agentSocket }} + - name: ssh-auth-sock + hostPath: + path: {{ .Values.migration.ssh.agentSocket }} + {{- end }} + {{- if .Values.migration.ssh.configPath }} + - name: ssh-config + hostPath: + path: {{ .Values.migration.ssh.configPath }} + {{- end }} + {{- if .Values.migration.ssh.knownHostsPath }} + - name: ssh-known-hosts + hostPath: + path: {{ .Values.migration.ssh.knownHostsPath }} + {{- end }} + {{- if .Values.migration.dataPath }} + - name: migration-data + hostPath: + path: {{ .Values.migration.dataPath }} + {{- end }} + {{- end }} +{{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/containers/server-helm/templates/ingress.yaml b/containers/server-helm/templates/ingress.yaml new file mode 100644 index 000000000000..9bf87a7c5c62 --- /dev/null +++ b/containers/server-helm/templates/ingress.yaml @@ -0,0 +1,187 @@ +{{- if not .Values.migration }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl + namespace: "{{ .Release.Namespace }}" + annotations: + {{- if eq .Values.ingress "traefik" }} + traefik.ingress.kubernetes.io/router.tls: "true" + traefik.ingress.kubernetes.io/router.tls.domains.n.main: "{{ .Values.fqdn }}" + traefik.ingress.kubernetes.io/router.entrypoints: "websecure,web" + {{- end }} + {{- if .Values.ingressSslAnnotations }} +{{ toYaml .Values.ingressSslAnnotations | indent 4 }} + {{- end }} + labels: + app: uyuni +spec: + tls: + - hosts: + - {{ .Values.fqdn }} + secretName: uyuni-cert + rules: + - host: {{ .Values.fqdn }} + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix + {{- if eq .Values.ingress "traefik" }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl-redirect + namespace: "{{ .Release.Namespace }}" + annotations: + traefik.ingress.kubernetes.io/router.middlewares: "default-uyuni-https-redirect@kubernetescrd" + traefik.ingress.kubernetes.io/router.entrypoints: "web" + labels: + app: uyuni +spec: + rules: + - host: {{ .Values.fqdn }} + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix + {{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-nossl + namespace: "{{ .Release.Namespace }}" + annotations: + {{- if eq .Values.ingress "nginx" }} + nginx.ingress.kubernetes.io/ssl-redirect: "false" + {{- else if eq .Values.ingress "traefik" }} + traefik.ingress.kubernetes.io/router.tls: "false" + traefik.ingress.kubernetes.io/router.entrypoints: "web" + {{- end }} + labels: + app: uyuni +spec: + rules: + - host: {{ .Values.fqdn }} + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /pub + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/([^/])+/DownloadFile + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /(rhn/)?rpc/api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/errors + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/ty/TinyUrl + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/websocket + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/metrics + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler_api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cblr + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /httpboot + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /os-images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /tftp + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /docs + pathType: Prefix +{{- end }} diff --git a/containers/server-helm/templates/k3s-ingress-routes.yaml b/containers/server-helm/templates/k3s-ingress-routes.yaml new file mode 100644 index 000000000000..73077c2c850f --- /dev/null +++ b/containers/server-helm/templates/k3s-ingress-routes.yaml @@ -0,0 +1,154 @@ +{{- if eq .Values.ingress "traefik" }} +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: uyuni-https-redirect + namespace: "{{ .Release.Namespace }}" +spec: + redirectScheme: + scheme: https + permanent: true +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: postgresql-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - postgres + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 5432 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: salt-publish-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - salt-publish + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 4505 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: salt-request-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - salt-request + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 4506 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: cobbler-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - cobbler + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 25151 +{{- if .Values.enableMonitoring | default true }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: postgresql-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - psql-mtrx + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 9187 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tasko-jmx-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tasko-jmx-mtrx + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 5556 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tomcat-jmx-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tomcat-jmx-mtrx + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 5557 +{{- end }} +{{- if .Values.exposeJavaDebug }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tomcat-debug-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tomcat-debug + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 8002 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tasko-debug-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tasko-debug + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 8001 +{{- end }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteUDP +metadata: + name: tftp-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tftp + routes: + - services: + - name: uyuni-udp + port: 69 +{{- end }} diff --git a/containers/server-helm/templates/service.yaml b/containers/server-helm/templates/service.yaml new file mode 100644 index 000000000000..30abe811dc14 --- /dev/null +++ b/containers/server-helm/templates/service.yaml @@ -0,0 +1,94 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: uyuni + name: uyuni-tcp + namespace: "{{ .Release.Namespace }}" +{{- if .Values.servicesAnnotations }} + annotations: +{{ toYaml .Values.servicesAnnotations | indent 4 }} +{{- end }} +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: salt-publish + port: 4505 + protocol: TCP + targetPort: 4505 + - name: salt-request + port: 4506 + protocol: TCP + targetPort: 4506 + - name: cobbler + port: 25151 + protocol: TCP + targetPort: 25151 + - name: postgresql + port: 5432 + protocol: TCP + targetPort: 5432 +{{- if .Values.enableMonitoring | default true }} + - name: node-exporter + port: 9100 + protocol: TCP + targetPort: 9100 + - name: postgres-exporter + port: 9187 + protocol: TCP + targetPort: 9187 + - name: taskomatic + port: 9800 + protocol: TCP + targetPort: 9800 + - name: taskomatic-jmx-metrics + port: 5556 + protocol: TCP + targetPort: 5556 + - name: tomcat-jmx-metrics + port: 5557 + protocol: TCP + targetPort: 5557 + +{{- end }} +{{- if .Values.exposeJavaDebug | default false }} + - name: tomcat-debug + port: 8003 + protocol: TCP + targetPort: 8003 + - name: search-debug + port: 8002 + protocol: TCP + targetPort: 8002 + - name: tasko-debug + port: 8001 + protocol: TCP + targetPort: 8001 +{{- end }} + selector: + app: uyuni + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: uyuni + name: uyuni-udp + namespace: "{{ .Release.Namespace }}" +{{- if .Values.servicesAnnotations }} + annotations: +{{ toYaml .Values.servicesAnnotations | indent 4 }} +{{- end }} +spec: + ports: + - name: tftp + port: 69 + protocol: UDP + targetPort: 69 + selector: + app: uyuni + type: ClusterIP diff --git a/containers/server-helm/templates/volumes.yaml b/containers/server-helm/templates/volumes.yaml new file mode 100644 index 000000000000..d60bea490571 --- /dev/null +++ b/containers/server-helm/templates/volumes.yaml @@ -0,0 +1,599 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-pgsql + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-pgsql +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-cache + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-cache +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-spacewalk + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-spacewalk +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-log + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 2Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-log +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-salt + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-salt +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-pub + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-pub +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-osimages + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-osimages +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-tftpboot + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-tftpboot +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-formulametadata + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-formulametadata +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-pillar + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-pillar +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-susemanager + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-susemanager +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-spacewalk + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-spacewalk +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: root + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: root +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-apache2 + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-apache2 +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-rhn + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-rhn +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-systemd-multi + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-systemd-multi +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-systemd-sockets + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-systemd-sockets +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-salt + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-salt +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tomcat + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-tomcat +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-sysconfig + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-sysconfig +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tls + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-tls +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-postfix + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-postfix +{{- end }} diff --git a/containers/server-helm/tito.props b/containers/server-helm/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/server-helm/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/containers/server-helm/values.yaml b/containers/server-helm/values.yaml new file mode 100644 index 000000000000..f954b6ee9798 --- /dev/null +++ b/containers/server-helm/values.yaml @@ -0,0 +1,78 @@ +# The default repository and image version if not defined otherwise +repository: registry.opensuse.org/uyuni +version: latest + +## Allows to override the default URI for an image if defined +## Requires a full URI in a form of /: +## +images: + # server: // + + +## Ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy +## +pullPolicy: "IfNotPresent" + +## uyuni server overall Persistent Volume access modes +## Must match those of existing PV or dynamic provisioner +## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +accessModes: + - ReadWriteOnce + +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +# storageClass: "-" + +## matchPvByLabel adds selectors on each claim to select a PV with a 'data' label matching the PVC name. +## This can be helpful for static PV management. +matchPvByLabel: false + +## mirror defines a volume or host path to mount in the container as server.susemanager.fromdir value. +## Use either claimName or hostPath to reference the volume source. +## +## When using claimName, both claims and PVs need to be defined before running the chart +## Note that hostPath will not work on multi-node cluster +## +## If the value is set before the first run of the server, the rhn.conf file will be adjusted during the setup. +#mirror: +# claimName: mirror +# hostPath: /srv/mirror + +# TODO Parametrize big volumes sizes + +## servicesAnnotations are annotations to set on both TCP and UDP services. +## This can be useful to share the same IP when using metallb +# servicesAnnotations: + +## exposeJavaDebug will expose the 8001, 8002 and 8003 ports to connect a Java debugger +## to taskomatic, search server and tomcat respectively +# exposeJavaDebug: true + +## enableMonitoring will expose the 9100 9187 5556 5557 9500 9800 ports for prometheus to scrape +enableMonitoring: true + +## ingress defines the ingress that is used in the cluster. +## It can be either "nginx", "traefik" or any other value. +ingress: "traefik" + +## ingressSsl are annotations to pass the SSL ingress. +## This can be used to set a cert-manager issuer like: +## ingressSslAnnotations: +## cert-manager.io/cluster-issuer: uyuniIssuer +# ingressSslAnnotations: + +# The time zone to set in the containers +timezone: "Etc/UTC" + +# Only used to migrate from an existing non-container server +# migration: +# ssh: +# agentSocket: /tmp/socket/path +# configPath: /home/mine/.ssh/config +# knownHostsPath: /home/mine/.ssh/known_hosts +# dataPath: /tmp/uyuni-migration diff --git a/rel-eng/packages/server-helm b/rel-eng/packages/server-helm new file mode 100644 index 000000000000..7ffc33013560 --- /dev/null +++ b/rel-eng/packages/server-helm @@ -0,0 +1 @@ +4.4.0 containers/server-helm/ From 89bf5d2b90c4faa3a27395626b027ecab0ded2ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 21 Mar 2023 17:36:46 +0100 Subject: [PATCH 07/40] Adjust testsuite for containerized server --- testsuite/.rubocop.yml | 3 + testsuite/features/core/allcli_sanity.feature | 3 +- .../init_clients/proxy_branch_network.feature | 2 + .../step_definitions/command_steps.rb | 94 +++++++++--- .../step_definitions/navigation_steps.rb | 3 +- testsuite/features/support/commonlib.rb | 28 ++-- testsuite/features/support/constants.rb | 2 +- testsuite/features/support/lavanda.rb | 134 ++++++++++++++++++ testsuite/features/support/twopence_init.rb | 22 ++- 9 files changed, 246 insertions(+), 45 deletions(-) diff --git a/testsuite/.rubocop.yml b/testsuite/.rubocop.yml index 0143161b2d44..e7c0b75b4235 100644 --- a/testsuite/.rubocop.yml +++ b/testsuite/.rubocop.yml @@ -9,5 +9,8 @@ Metrics/CyclomaticComplexity: Metrics/ClassLength: Enabled: false +Metrics/ModuleLength: + Enabled: false + Style/MissingElse: Enabled: false diff --git a/testsuite/features/core/allcli_sanity.feature b/testsuite/features/core/allcli_sanity.feature index 4949116b4fca..e28d0750c9f5 100644 --- a/testsuite/features/core/allcli_sanity.feature +++ b/testsuite/features/core/allcli_sanity.feature @@ -6,8 +6,7 @@ Feature: Sanity checks I want to be sure to use a sane environment Scenario: The server is healthy - Then "server" should have a FQDN - And reverse resolution should work for "server" + Then reverse resolution should work for "server" And the clock from "server" should be exact And service "apache2" is enabled on "server" And service "apache2" is active on "server" diff --git a/testsuite/features/init_clients/proxy_branch_network.feature b/testsuite/features/init_clients/proxy_branch_network.feature index eb6ac0ae2e8f..0c033de31db8 100644 --- a/testsuite/features/init_clients/proxy_branch_network.feature +++ b/testsuite/features/init_clients/proxy_branch_network.feature @@ -30,6 +30,7 @@ Feature: Setup Uyuni for Retail branch network @proxy @private_net @susemanager +@skip_if_container_server Scenario: Install the Retail pattern on the SUSE Manager server When I refresh the metadata for "server" When I install pattern "suma_retail" on this "server" @@ -39,6 +40,7 @@ Feature: Setup Uyuni for Retail branch network @proxy @private_net @uyuni +@skip_if_container_server Scenario: Install the Retail pattern on the Uyuni server When I refresh the metadata for "server" When I install pattern "uyuni_retail" on this "server" diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index aae0f807e3cc..8ba956d41169 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -454,7 +454,7 @@ When(/^I fetch "([^"]*)" to "([^"]*)"$/) do |file, host| node = get_target(host) - node.run("wget http://#{get_target('server').full_hostname}/#{file}") + node.run("curl -s -O http://#{get_target('server').full_hostname}/#{file}") end When(/^I wait until file "([^"]*)" contains "([^"]*)" on server$/) do |file, content| @@ -579,8 +579,8 @@ return_code = file_inject(get_target('server'), source, dest) raise 'File injection failed' unless return_code.zero? end - get_target('server').run('curl --output DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') - get_target('server').run('unzip DSP2043_2019.1.zip') + get_target('server').run('curl --output /root/DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') + get_target('server').run('unzip /root/DSP2043_2019.1.zip -d /root/') cmd = "/usr/bin/python3 /root/Redfish-Mockup-Server/redfishMockupServer.py " \ "-H #{get_target('server').full_hostname} -p 8443 " \ "-S -D /root/DSP2043_2019.1/public-catfish/ " \ @@ -1005,35 +1005,91 @@ end When(/^I copy server\'s keys to the proxy$/) do - %w[RHN-ORG-PRIVATE-SSL-KEY RHN-ORG-TRUSTED-SSL-CERT rhn-ca-openssl.cnf].each do |file| - return_code = file_extract(get_target('server'), '/root/ssl-build/' + file, '/tmp/' + file) - raise 'File extraction failed' unless return_code.zero? - get_target('proxy').run('mkdir -p /root/ssl-build') - return_code = file_inject(get_target('proxy'), '/tmp/' + file, '/root/ssl-build/' + file) - raise 'File injection failed' unless return_code.zero? + _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) + if code.zero? + # Server running in Kubernetes doesn't know anything about SSL CA + certificate = "apiVersion: cert-manager.io/v1\\n"\ + "kind: Certificate\\n"\ + "metadata:\\n"\ + " name: uyuni-proxy\\n"\ + "spec:\\n"\ + " secretName: uyuni-proxy-cert\\n"\ + " subject:\\n"\ + " countries: ['DE']\\n"\ + " provinces: ['Bayern']\\n"\ + " localities: ['Nuernberg']\\n"\ + " organizations: ['SUSE']\\n"\ + " organizationalUnits: ['SUSE']\\n"\ + " emailAddresses:\\n"\ + " - galaxy-noise@suse.de\\n"\ + " commonName: #{get_target('proxy').full_hostname}\\n"\ + " dnsNames:\\n"\ + " - #{get_target('proxy').full_hostname}\\n"\ + " issuerRef:\\n"\ + " name: uyuni-ca-issuer\\n"\ + " kind: Issuer" + _out, return_code = get_target('server').run_local("echo -e \"#{certificate}\" | kubectl apply -f -") + raise 'Failed to define proxy Certificate resource' unless return_code.zero? + # cert-manager takes some time to generate the secret, wait for it before continuing + repeat_until_timeout(timeout: 600, message: "Kubernetes uyuni-proxy-cert secret has not been defined") do + _result, code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert", check_errors: false) + break if code.zero? + sleep 1 + end + _out, return_code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.crt}' | base64 -d >/tmp/proxy.crt") + raise 'Failed to store proxy certificate' unless return_code.zero? + _out, return_code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.key}' | base64 -d >/tmp/proxy.key") + raise 'Failed to store proxy key' unless return_code.zero? + _out, return_code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.ca\\.crt}' | base64 -d >/tmp/ca.crt") + raise 'Failed to store CA certificate' unless return_code.zero? + + %w[proxy.crt proxy.key ca.crt].each do |file| + return_code, = get_target('server').extract_file("/tmp/#{file}", "/tmp/#{file}") + raise 'File extraction failed' unless return_code.zero? + return_code = file_inject(get_target('proxy'), "/tmp/#{file}", "/tmp/#{file}") + raise 'File injection failed' unless return_code.zero? + end + else + %w[RHN-ORG-PRIVATE-SSL-KEY RHN-ORG-TRUSTED-SSL-CERT rhn-ca-openssl.cnf].each do |file| + return_code = file_extract(get_target('server'), '/root/ssl-build/' + file, '/tmp/' + file) + raise 'File extraction failed' unless return_code.zero? + get_target('proxy').run('mkdir -p /root/ssl-build') + return_code = file_inject(get_target('proxy'), '/tmp/' + file, '/root/ssl-build/' + file) + raise 'File injection failed' unless return_code.zero? + end end end When(/^I configure the proxy$/) do + _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) + # prepare the settings file settings = "RHN_PARENT=#{get_target('server').full_hostname}\n" \ "HTTP_PROXY=''\n" \ "VERSION=''\n" \ "TRACEBACK_EMAIL=galaxy-noise@suse.de\n" \ - "USE_EXISTING_CERTS=n\n" \ "INSTALL_MONITORING=n\n" \ - "SSL_PASSWORD=spacewalk\n" \ - "SSL_ORG=SUSE\n" \ - "SSL_ORGUNIT=SUSE\n" \ - "SSL_COMMON=#{get_target('proxy').full_hostname}\n" \ - "SSL_CITY=Nuremberg\n" \ - "SSL_STATE=Bayern\n" \ - "SSL_COUNTRY=DE\n" \ - "SSL_EMAIL=galaxy-noise@suse.de\n" \ - "SSL_CNAME_ASK=proxy.example.org\n" \ "POPULATE_CONFIG_CHANNEL=y\n" \ "RHN_USER=admin\n" \ "ACTIVATE_SLP=y\n" + settings += if code.zero? + "USE_EXISTING_CERTS=y\n" \ + "CA_CERT=/tmp/ca.crt\n" \ + "SERVER_KEY=/tmp/proxy.key\n" \ + "SERVER_CERT=/tmp/proxy.crt\n" + else + "USE_EXISTING_CERTS=n\n" \ + "INSTALL_MONITORING=n\n" \ + "SSL_PASSWORD=spacewalk\n" \ + "SSL_ORG=SUSE\n" \ + "SSL_ORGUNIT=SUSE\n" \ + "SSL_COMMON=#{get_target('proxy').full_hostname}\n" \ + "SSL_CITY=Nuremberg\n" \ + "SSL_STATE=Bayern\n" \ + "SSL_COUNTRY=DE\n" \ + "SSL_EMAIL=galaxy-noise@suse.de\n" \ + "SSL_CNAME_ASK=proxy.example.org\n" + end path = generate_temp_file('config-answers.txt', settings) step 'I copy "' + path + '" to "proxy"' `rm #{path}` diff --git a/testsuite/features/step_definitions/navigation_steps.rb b/testsuite/features/step_definitions/navigation_steps.rb index 982196c30de5..fe8d78530114 100644 --- a/testsuite/features/step_definitions/navigation_steps.rb +++ b/testsuite/features/step_definitions/navigation_steps.rb @@ -461,9 +461,8 @@ system_name = get_system_name(host) rescue raise "Host #{host} not found" if if_present.empty? - log "Host #{host} is not deployed, not trying to select it" - return + next end step %(I select "#{system_name}" from "#{field}") end diff --git a/testsuite/features/support/commonlib.rb b/testsuite/features/support/commonlib.rb index d9b59c2651ed..68c765ceb8e8 100644 --- a/testsuite/features/support/commonlib.rb +++ b/testsuite/features/support/commonlib.rb @@ -361,14 +361,8 @@ def get_system_name(host) when 'containerized_proxy' system_name = get_target('proxy').full_hostname.sub('pxy', 'pod-pxy') else - begin - node = get_target(host) - system_name = node.full_hostname - rescue NotImplementedError => e - # If the node for that host is not defined, just return the host parameter as system_name - warn e.message - system_name = host - end + node = get_target(host) + system_name = node.full_hostname end system_name end @@ -393,36 +387,30 @@ def net_prefix # This function tests whether a file exists on a node def file_exists?(node, file) - _out, local, _remote, code = node.test_and_store_results_together("test -f #{file}", 'root', 500) - code.zero? && local.zero? + node.file_exists(file) end # This function tests whether a folder exists on a node def folder_exists?(node, file) - _out, local, _remote, code = node.test_and_store_results_together("test -d #{file}", 'root', 500) - code.zero? && local.zero? + node.folder_exists(file) end # This function deletes a file from a node def file_delete(node, file) - _out, _local, _remote, code = node.test_and_store_results_together("rm #{file}", 'root', 500) - code + node.file_delete(file) end # This function deletes a file from a node def folder_delete(node, folder) - _out, _local, _remote, code = node.test_and_store_results_together("rm -rf #{folder}", 'root', 500) - code + node.folder_delete(folder) end # This function extracts a file from a node def file_extract(node, remote_file, local_file) - code, _remote = node.extract_file(remote_file, local_file, 'root', false) - code + node.extract(remote_file, local_file, 'root', false) end # This function injects a file into a node def file_inject(node, local_file, remote_file) - code, _remote = node.inject_file(local_file, remote_file, 'root', false) - code + node.inject(local_file, remote_file, 'root', false) end diff --git a/testsuite/features/support/constants.rb b/testsuite/features/support/constants.rb index cf4e273d0f70..b1cf0aff6b88 100644 --- a/testsuite/features/support/constants.rb +++ b/testsuite/features/support/constants.rb @@ -196,7 +196,7 @@ sle_base_channel = if ENV['PROVIDER'].include? 'podman' 'Fake Base Channel' - elsif ENV['SERVER'].include?('uyuni') || ENV['SERVER'].include?('suma-pr') + elsif ENV['SERVER'].include?('uyuni') || ENV['SERVER'].include?('suma-pr') || $is_container_server 'openSUSE Leap 15.5 (x86_64)' else 'SLES15-SP4-Pool' diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 64ca455ab277..b298cb1cd5b3 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -77,6 +77,12 @@ def init_os_version(os_version) @in_os_version = os_version end + ## + # Initializes the @in_has_uyunictl variable to true. + def init_has_uyunictl + @in_has_uyunictl = true + end + # getter functions, executed on testsuite def hostname raise 'empty hostname, something wrong' if @in_hostname.empty? @@ -145,6 +151,23 @@ def os_version # buffer_size: The maximum buffer size in bytes. Defaults to 65536. # verbose: Whether to log the output of the command in case of success. Defaults to false. def run(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) + cmd_prefixed = @in_has_uyunictl ? "uyunictl exec -i '#{cmd.gsub(/'/, '\'"\'"\'')}'" : cmd + run_local(cmd_prefixed, separated_results: separated_results, check_errors: check_errors, timeout: timeout, user: user, successcodes: successcodes, buffer_size: buffer_size, verbose: verbose) + end + + ## + # It runs a command, and returns the output, error, and exit code. + # + # Args: + # cmd: The command to run. + # separated_results: Whether the results should be stored separately. Defaults to false. + # check_errors: Whether to check for errors or not. Defaults to true. + # timeout: The timeout to be used, in seconds. Defaults to 250 or the value of the DEFAULT_TIMEOUT environment variable. + # user: The user to be used to run the command. Defaults to root. + # successcodes: An array with the values to be accepted as success codes from the command run. + # buffer_size: The maximum buffer size in bytes. Defaults to 65536. + # verbose: Whether to log the output of the command in case of success. Defaults to false. + def run_local(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) if separated_results out, err, _lo, _rem, code = test_and_store_results_separately(cmd, user, timeout, buffer_size) else @@ -202,4 +225,115 @@ def wait_while_process_running(process) result end end + + ## + # Copy a local file to a remote node. + # Handles copying to the server container if possible + # + # Args: + # local_file: The path to the file to copy + # remote_file: The path in the destination + # user: The owner of the file + def inject(local_file, remote_file, user = 'root', dots = true) + if @in_has_uyunictl + tmp_folder, _code = run_local('mktemp -d') + tmp_file = File.join(tmp_folder.strip, File.basename(local_file)) + code, _remote = inject_file(local_file, tmp_file, user, dots) + if code.zero? + _out, code = run_local("uyunictl cp --user #{user} #{tmp_file} server:#{remote_file}") + raise "Failed to copy #{tmp_file} to container" unless code.zero? + end + run_local("rm -r #{tmp_folder}") + else + code, _remote = inject_file(local_file, remote_file, user, dots) + end + code + end + + ## + # Copy a remote file to a local one + # Handles copying from the server container if possible + # + # Args: + # remote_file: The path in the destination + # local_file: The path to the file to copy + # user: The owner of the file + def extract(remote_file, local_file, user = 'root', dots = true) + if @in_has_uyunictl + tmp_folder, _code = run_local('mktemp -d') + tmp_file = File.join(tmp_folder.strip, File.basename(remote_file)) + _out, code = run_local("uyunictl cp --user #{user} server:#{remote_file} #{tmp_file}") + raise "Failed to extract #{remote_file} from container" unless code.zero? + code, _remote = extract_file(tmp_file, local_file, user, dots) + raise "Failed to extract #{tmp_file} from host" unless code.zero? + run_local("rm -r #{tmp_folder}") + else + code, _local = extract_file(remote_file, local_file, user, dots) + end + code + end + + ## + # Check if a file exists on a node. + # Handles checking in server container if possible. + # + # Args: + # file: The path to check on the node. + def file_exists(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'test -f #{file}'", check_errors: false) + exists = code.zero? + else + _out, local, _remote, code = test_and_store_results_together("test -f #{file}", 'root', 500) + exists = code.zero? && local.zero? + end + exists + end + + ## + # Check if a folder exists on a node. + # Handles checking in server container if possible. + # + # Args: + # file: The path to check on the node. + def folder_exists(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'test -d #{file}'", check_errors: false) + exists = code.zero? + else + _out, local, _remote, code = test_and_store_results_together("test -d #{file}", 'root', 500) + exists = code.zero? && local.zero? + end + exists + end + + ## + # Delete a file on a node. + # Handles checking in server container if possible. + # + # Args: + # file: The path of the file to delete on the node. + def file_delete(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'rm #{file}'", check_errors: false) + else + _out, _local, _remote, code = test_and_store_results_together("rm #{file}", 'root', 500) + end + code + end + + ## + # Delete a folder on a node. + # Handles checking in server container if possible. + # + # Args: + # folder: The path of the folder to delete on the node. + def folder_delete(folder) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'rm -rf #{folder}'", check_errors: false) + else + _out, _local, _remote, code = test_and_store_results_together("rm -rf #{folder}", 'root', 500) + end + code + end end diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 9f84560d6342..2c088d00f2ec 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -18,7 +18,7 @@ def client_public_ip(node) raise "Cannot resolve node for host '#{host}'" if node.nil? %w[br0 eth0 eth1 ens0 ens1 ens2 ens3 ens4 ens5 ens6].each do |dev| - output, code = node.run("ip address show dev #{dev} | grep 'inet '", check_errors: false) + output, code = node.run_local("ip address show dev #{dev} | grep 'inet '", check_errors: false) next unless code.zero? node.init_public_interface(dev) @@ -41,6 +41,24 @@ def process_private_and_public_ip(host, node) node end +def initialize_server(host, node) + _out, code = node.run('which uyunictl', check_errors: false) + node.init_has_uyunictl if code.zero? + + fqdn, code = node.run('sed -n \'s/^java.hostname *= *\(.\+\)$/\1/p\' /etc/rhn/rhn.conf') + raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? + raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? + node.init_full_hostname(fqdn) + node.init_hostname(fqdn.split('.')[0]) + + node = process_os_family_and_version(host, fqdn, node.hostname, node) + node = process_private_and_public_ip(host, node) + + $node_by_host[host] = node + $host_by_node[node] = host + node +end + # Initialize a Twopence node through its host (additionally it will setup some handy maps) def twopence_init(host) puts "Initializing a twopence node for '#{host}'." @@ -61,6 +79,8 @@ def twopence_init(host) # Look at support/lavanda.rb for more details node.extend(LavandaBasic) + return initialize_server(host, node) if host == 'server' + # Initialize hostname hostname, local, remote, code = node.test_and_store_results_together('hostname', 'root', 500) From 25a1ab58b085c13ad415faa70fa47105f9a43f8c Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 18 Jul 2023 17:07:46 +0200 Subject: [PATCH 08/40] testsuite: increase waiting AJAX transition --- testsuite/features/support/commonlib.rb | 8 ++++---- testsuite/features/support/env.rb | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testsuite/features/support/commonlib.rb b/testsuite/features/support/commonlib.rb index 68c765ceb8e8..23f83dbc5c31 100644 --- a/testsuite/features/support/commonlib.rb +++ b/testsuite/features/support/commonlib.rb @@ -129,7 +129,7 @@ def format_detail(message, last_result, report_result) def click_button_and_wait(locator = nil, **options) click_button(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -138,7 +138,7 @@ def click_button_and_wait(locator = nil, **options) def click_link_and_wait(locator = nil, **options) click_link(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -147,7 +147,7 @@ def click_link_and_wait(locator = nil, **options) def click_link_or_button_and_wait(locator = nil, **options) click_link_or_button(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -158,7 +158,7 @@ module CapybaraNodeElementExtension def click super begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end diff --git a/testsuite/features/support/env.rb b/testsuite/features/support/env.rb index c9a043217f63..719d0ca6a556 100644 --- a/testsuite/features/support/env.rb +++ b/testsuite/features/support/env.rb @@ -188,7 +188,7 @@ def process_code_coverage AfterStep do if has_css?('.senna-loading', wait: 0) log 'WARN: Step ends with an ajax transition not finished, let\'s wait a bit!' - log 'Timeout: Waiting AJAX transition' unless has_no_css?('.senna-loading', wait: 20) + log 'Timeout: Waiting AJAX transition' unless has_no_css?('.senna-loading', wait: 40) end end From 799cfda5a7d0b55d8013a819fd04d3d0b319e38c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 22 Jun 2023 09:32:14 +0200 Subject: [PATCH 09/40] Initial Hub XML-RPC API container image --- containers/hub-xmlrpc-api-image/Dockerfile | 27 +++++++++++++++++++ containers/hub-xmlrpc-api-image/_service | 4 +++ .../hub-xmlrpc-api.changes | 4 +++ containers/hub-xmlrpc-api-image/tito.props | 2 ++ rel-eng/packages/hub-xmlrpc-api-image | 1 + 5 files changed, 38 insertions(+) create mode 100644 containers/hub-xmlrpc-api-image/Dockerfile create mode 100644 containers/hub-xmlrpc-api-image/_service create mode 100644 containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes create mode 100644 containers/hub-xmlrpc-api-image/tito.props create mode 100644 rel-eng/packages/hub-xmlrpc-api-image diff --git a/containers/hub-xmlrpc-api-image/Dockerfile b/containers/hub-xmlrpc-api-image/Dockerfile new file mode 100644 index 000000000000..b327d42d33bf --- /dev/null +++ b/containers/hub-xmlrpc-api-image/Dockerfile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/hub-xmlrpc-api:latest + +ARG BASE=registry.suse.com/bci/bci-base:15.5 +FROM $BASE + +RUN zypper --gpg-auto-import-keys --non-interactive install hub-xmlrpc-api + +# LABELs +ARG PRODUCT=Uyuni +ARG VENDOR="Uyuni project" +ARG URL="https://www.uyuni-project.org/" +ARG REFERENCE_PREFIX="registry.opensuse.org/uyuni" + +# Build Service required labels +# labelprefix=org.opensuse.uyuni.hub-xmlrpc-api +LABEL org.opencontainers.image.title="${PRODUCT} Hub XML-RPC API container" +LABEL org.opencontainers.image.description="${PRODUCT} Hub XML-RPC API image" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="${VENDOR}" +LABEL org.opencontainers.image.url="${URL}" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opensuse.reference="${REFERENCE_PREFIX}/server:4.4.0.%RELEASE%" +# endlabelprefix + +CMD ["/usr/bin/hub-xmlrpc-api"] diff --git a/containers/hub-xmlrpc-api-image/_service b/containers/hub-xmlrpc-api-image/_service new file mode 100644 index 000000000000..bde87fa5bc1f --- /dev/null +++ b/containers/hub-xmlrpc-api-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes b/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes new file mode 100644 index 000000000000..e2637480450b --- /dev/null +++ b/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Thu Jun 22 07:30:36 UTC 2023 - Cédric Bosdonnat + +- Initial image for Uyuni Hub XML-RPC API diff --git a/containers/hub-xmlrpc-api-image/tito.props b/containers/hub-xmlrpc-api-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/hub-xmlrpc-api-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/rel-eng/packages/hub-xmlrpc-api-image b/rel-eng/packages/hub-xmlrpc-api-image new file mode 100644 index 000000000000..45971d5a183c --- /dev/null +++ b/rel-eng/packages/hub-xmlrpc-api-image @@ -0,0 +1 @@ +4.4.0 containers/hub-xmlrpc-api-image/ From 7cd89158b33180e6335fe76b96c89f01250b2064 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 20 Jul 2023 11:35:23 +0200 Subject: [PATCH 10/40] timezone service --- containers/server-image/Dockerfile | 11 ++++++++++- containers/server-image/timezone_alignment.service | 10 ++++++++++ containers/server-image/timezone_alignment.sh | 5 +++++ 3 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 containers/server-image/timezone_alignment.service create mode 100755 containers/server-image/timezone_alignment.sh diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index dab8bfdae92a..b5c487b7a32d 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -6,6 +6,11 @@ FROM $INIT_BASE ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" +COPY timezone_alignment.sh /usr/bin + +# Copy timezone link update service +COPY timezone_alignment.service /usr/lib/systemd/system/ + COPY remove_unused.sh . RUN echo "rpm.install.excludedocs = yes" >>/etc/zypp/zypp.conf @@ -61,7 +66,11 @@ COPY java_agent.yaml /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml COPY tomcat_jmx.conf /usr/lib/systemd/system/tomcat.service.d/jmx.conf COPY taskomatic_jmx.conf /usr/lib/systemd/system/taskomatic.service.d/jmx.conf -RUN systemctl enable prometheus-node_exporter +RUN chmod -R 755 /usr/bin/timezone_alignment.sh + +RUN systemctl enable prometheus-node_exporter; \ + systemctl enable uyuni-setup; \ + systemctl enable timezone_alignment; # LABELs ARG PRODUCT=Uyuni diff --git a/containers/server-image/timezone_alignment.service b/containers/server-image/timezone_alignment.service new file mode 100644 index 000000000000..d091ff8329f9 --- /dev/null +++ b/containers/server-image/timezone_alignment.service @@ -0,0 +1,10 @@ +[Unit] +Description=Timezone alignment +After=postgresql.service + +[Service] +ExecStart=timezone_alignment.sh +Type=oneshot + +[Install] +WantedBy=multi-user.target diff --git a/containers/server-image/timezone_alignment.sh b/containers/server-image/timezone_alignment.sh new file mode 100755 index 000000000000..9f66b822c86a --- /dev/null +++ b/containers/server-image/timezone_alignment.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [[ ! -z "$TZ" ]]; then + timedatectl set-timezone $TZ +fi From a5ce845f35e405b94d26509549f1adeaa1c91c7f Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 3 Aug 2023 12:12:02 +0200 Subject: [PATCH 11/40] move /srv/www/htdoc/pub/repositories to /usr/share/susemanager/gpg/repositories --- susemanager/empty-repo.conf | 2 ++ ...nager.changes.mbussolotto.empty-repos-move | 1 + susemanager/susemanager.spec | 32 +++++++++++-------- 3 files changed, 22 insertions(+), 13 deletions(-) create mode 100644 susemanager/empty-repo.conf create mode 100644 susemanager/susemanager.changes.mbussolotto.empty-repos-move diff --git a/susemanager/empty-repo.conf b/susemanager/empty-repo.conf new file mode 100644 index 000000000000..82b468721cff --- /dev/null +++ b/susemanager/empty-repo.conf @@ -0,0 +1,2 @@ +RewriteRule ^/pub/repositories/empty/(.*)$ /gpg/repositories/empty/$1 [L,PT] +RewriteRule ^/pub/repositories/empty-deb/(.*)$ /gpg/repositories/empty-deb/$1 [L,PT] diff --git a/susemanager/susemanager.changes.mbussolotto.empty-repos-move b/susemanager/susemanager.changes.mbussolotto.empty-repos-move new file mode 100644 index 000000000000..23a69e167183 --- /dev/null +++ b/susemanager/susemanager.changes.mbussolotto.empty-repos-move @@ -0,0 +1 @@ +- Move empty repositories from pub to /usr/share/susemanager diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index 05fa3161d7d0..b98bbc3c2ce0 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -30,7 +30,6 @@ %global salt_group root %global serverdir %{_sharedstatedir} %global wwwroot %{_localstatedir}/www -%global wwwdocroot %{wwwroot}/html %endif %if 0%{?suse_version} @@ -41,9 +40,10 @@ %global salt_group salt %global serverdir /srv %global wwwroot %{serverdir}/www -%global wwwdocroot %{wwwroot}/htdocs %endif +%global reporoot %{_datarootdir}/susemanager/gpg/ + %global debug_package %{nil} Name: susemanager @@ -156,6 +156,7 @@ Requires: spacewalk-backend-sql Requires: spacewalk-common Requires: susemanager-build-keys Requires: susemanager-sync-data +Requires: uyuni-build-keys BuildRequires: docbook-utils %description tools @@ -189,13 +190,15 @@ install -m 0644 etc/logrotate.d/susemanager-tools %{buildroot}/%{_sysconfdir}/lo install -m 0644 etc/slp.reg.d/susemanager.reg %{buildroot}/%{_sysconfdir}/slp.reg.d make -C src install PREFIX=$RPM_BUILD_ROOT PYTHON_BIN=%{pythonX} MANDIR=%{_mandir} install -d -m 755 %{buildroot}/%{wwwroot}/os-images/ +mkdir -p %{buildroot}/etc/apache2/conf.d +install empty-repo.conf %{buildroot}/etc/apache2/conf.d/empty-repo.conf # empty repo for rhel base channels -mkdir -p %{buildroot}%{wwwdocroot}/pub/repositories/ -cp -r pub/empty %{buildroot}%{wwwdocroot}/pub/repositories/ +mkdir -p %{buildroot}%{reporoot}/repositories/ +cp -r pub/empty %{buildroot}%{reporoot}/repositories/ # empty repo for Ubuntu base fake channel -cp -r pub/empty-deb %{buildroot}%{wwwdocroot}/pub/repositories/ +cp -r pub/empty-deb %{buildroot}%{reporoot}/repositories/ # YaST configuration mkdir -p %{buildroot}%{_datadir}/YaST2/clients @@ -291,11 +294,13 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{pythonsmroot}/susemanager %dir %{_prefix}/share/rhn/ %dir %{_datadir}/susemanager -%dir %{wwwdocroot}/pub -%dir %{wwwdocroot}/pub/repositories -%dir %{wwwdocroot}/pub/repositories/empty -%dir %{wwwdocroot}/pub/repositories/empty/repodata -%dir %{wwwdocroot}/pub/repositories/empty-deb +%dir %{reporoot} +%dir %{reporoot}/repositories +%dir %{reporoot}/repositories/empty +%dir %{reporoot}/repositories/empty/repodata +%dir %{reporoot}/repositories/empty-deb +%dir /etc/apache2 +%dir /etc/apache2/conf.d %config(noreplace) %{_sysconfdir}/logrotate.d/susemanager-tools %{_prefix}/share/rhn/config-defaults/rhn_*.conf %attr(0755,root,root) %{_bindir}/mgr-salt-ssh @@ -318,8 +323,9 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %{_datadir}/susemanager/__pycache__/ %endif %{_mandir}/man8/mgr-sync.8* -%{wwwdocroot}/pub/repositories/empty/repodata/*.xml* -%{wwwdocroot}/pub/repositories/empty-deb/Packages -%{wwwdocroot}/pub/repositories/empty-deb/Release +%{reporoot}/repositories/empty/repodata/*.xml* +%{reporoot}/repositories/empty-deb/Packages +%{reporoot}/repositories/empty-deb/Release +/etc/apache2/conf.d/empty-repo.conf %changelog From e7ff5f1b46e73fa557c3d3bc1e0055b572475fda Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Fri, 11 Aug 2023 15:24:54 +0200 Subject: [PATCH 12/40] setup postfix hostname using conf file --- susemanager/bin/mgr-setup | 23 ++++--------------- .../susemanager.changes.mbussolotto.postfix | 1 + utils/spacewalk-hostname-rename | 4 ++++ utils/susemanager.changes.mbussolotto.postfix | 1 + 4 files changed, 10 insertions(+), 19 deletions(-) create mode 100644 susemanager/susemanager.changes.mbussolotto.postfix create mode 100644 utils/susemanager.changes.mbussolotto.postfix diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 39c61c6f56c3..dfc90c001315 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -172,25 +172,10 @@ fi } setup_mail () { - -# fix hostname for postfix -REALHOSTNAME=`hostname -f` -if [ -z "$REALHOSTNAME" ]; then - for i in `ip -f inet -o addr show scope global | awk '{print $4}' | awk -F \/ '{print $1}'`; do - for j in `dig +noall +answer +time=2 +tries=1 -x $i | awk '{print $5}' | sed 's/\.$//'`; do - if [ -n "$j" ]; then - REALHOSTNAME=$j - break 2 - fi - done - done -fi -if [ -n "$REALHOSTNAME" ]; then - echo "$REALHOSTNAME" > /etc/hostname -fi -# bsc#979664 - SUSE Manager requires a working mail system -systemctl --quiet enable postfix 2>&1 -systemctl restart postfix + postconf -e myhostname=$HOSTNAME + # bsc#979664 - SUSE Manager requires a working mail system + systemctl --quiet enable postfix 2>&1 + systemctl restart postfix } setup_hostname() { diff --git a/susemanager/susemanager.changes.mbussolotto.postfix b/susemanager/susemanager.changes.mbussolotto.postfix new file mode 100644 index 000000000000..76abe2b83535 --- /dev/null +++ b/susemanager/susemanager.changes.mbussolotto.postfix @@ -0,0 +1 @@ +- setup postfix hostname using conf file diff --git a/utils/spacewalk-hostname-rename b/utils/spacewalk-hostname-rename index cde5ff6c0723..66931e5890c7 100755 --- a/utils/spacewalk-hostname-rename +++ b/utils/spacewalk-hostname-rename @@ -638,6 +638,10 @@ if [ -e $MGR_SYNC_CONF ]; then fi print_status 0 # just simulate end +echo -n "Changing postfix settings ... " | tee -a $LOG +postconf -e myhostname=$HOSTNAME +systemctl restart postfix + echo -n "Starting spacewalk services ... " | tee -a $LOG if [ "$DB_SERVICE" != "" ] then diff --git a/utils/susemanager.changes.mbussolotto.postfix b/utils/susemanager.changes.mbussolotto.postfix new file mode 100644 index 000000000000..76abe2b83535 --- /dev/null +++ b/utils/susemanager.changes.mbussolotto.postfix @@ -0,0 +1 @@ +- setup postfix hostname using conf file From 70b975db1aa345ed16743c33c22ce730ad0a455d Mon Sep 17 00:00:00 2001 From: Ricardo Mateus Date: Fri, 8 Sep 2023 15:53:58 +0100 Subject: [PATCH 13/40] call update ca cert as part of start up Signed-off-by: Ricardo Mateus --- ...cewalk-admin.changes.rjmateus.call_update_ca_certs_at_startup | 1 + spacewalk/admin/uyuni-check-database.service | 1 + 2 files changed, 2 insertions(+) create mode 100644 spacewalk/admin/spacewalk-admin.changes.rjmateus.call_update_ca_certs_at_startup diff --git a/spacewalk/admin/spacewalk-admin.changes.rjmateus.call_update_ca_certs_at_startup b/spacewalk/admin/spacewalk-admin.changes.rjmateus.call_update_ca_certs_at_startup new file mode 100644 index 000000000000..d8a8fb183c8c --- /dev/null +++ b/spacewalk/admin/spacewalk-admin.changes.rjmateus.call_update_ca_certs_at_startup @@ -0,0 +1 @@ +- Call service ca-certificates as a dependency for database check diff --git a/spacewalk/admin/uyuni-check-database.service b/spacewalk/admin/uyuni-check-database.service index ff8ad7f85c96..3c136f24abcc 100644 --- a/spacewalk/admin/uyuni-check-database.service +++ b/spacewalk/admin/uyuni-check-database.service @@ -2,6 +2,7 @@ Description=Uyuni check database Before=tomcat.service apache2.service salt-master.service salt-api.service rhn-search.service cobblerd.service taskomatic.service mgr-events-config.service mgr-websockify.service After=network-online.target postgresql.service +Requires=ca-certificates.service [Service] ExecStart=/usr/sbin/spacewalk-startup-helper check-database From 0b61b7b430472a6de6f8c0c87b06ad1f4c7e4d3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 13 Sep 2023 11:37:45 +0200 Subject: [PATCH 14/40] Remove https use for localhost In the kubernetes container the SSL connection is stopped at the ingress level so localhost doesn't handle HTTPS. --- client/tools/mgr-push/mgr-push.changes.cbosdo.http-server | 1 + client/tools/mgr-push/rhnpushrc | 2 +- susemanager-utils/susemanager-sls/src/modules/uyuni_config.py | 2 +- .../susemanager-sls/susemanager-sls.changes.cbosdo.local-http | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 client/tools/mgr-push/mgr-push.changes.cbosdo.http-server create mode 100644 susemanager-utils/susemanager-sls/susemanager-sls.changes.cbosdo.local-http diff --git a/client/tools/mgr-push/mgr-push.changes.cbosdo.http-server b/client/tools/mgr-push/mgr-push.changes.cbosdo.http-server new file mode 100644 index 000000000000..c7a4ad014775 --- /dev/null +++ b/client/tools/mgr-push/mgr-push.changes.cbosdo.http-server @@ -0,0 +1 @@ +- Use http to connect to localhost server diff --git a/client/tools/mgr-push/rhnpushrc b/client/tools/mgr-push/rhnpushrc index f42ab1f7bc83..48a23e42f0d3 100644 --- a/client/tools/mgr-push/rhnpushrc +++ b/client/tools/mgr-push/rhnpushrc @@ -53,7 +53,7 @@ count = dir = #Push to this server (http[s]:///APP) -server = https://localhost/APP +server = http://localhost/APP #Manage this channel(s) channel = diff --git a/susemanager-utils/susemanager-sls/src/modules/uyuni_config.py b/susemanager-utils/susemanager-sls/src/modules/uyuni_config.py index 9164fdda6d38..d0ae11e72485 100644 --- a/susemanager-utils/susemanager-sls/src/modules/uyuni_config.py +++ b/susemanager-utils/susemanager-sls/src/modules/uyuni_config.py @@ -35,7 +35,7 @@ class RPCClient: RPC Client """ - def __init__(self, user: str = None, password: str = None, url: str = "https://localhost/rpc/api"): + def __init__(self, user: str = None, password: str = None, url: str = "http://localhost/rpc/api"): """ XML-RPC client interface. diff --git a/susemanager-utils/susemanager-sls/susemanager-sls.changes.cbosdo.local-http b/susemanager-utils/susemanager-sls/susemanager-sls.changes.cbosdo.local-http new file mode 100644 index 000000000000..5a985c7b9a8a --- /dev/null +++ b/susemanager-utils/susemanager-sls/susemanager-sls.changes.cbosdo.local-http @@ -0,0 +1 @@ +- Use http for connections to localhost From ddc9804d6f7421705ead732ed10d8e66690437ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 19 Sep 2023 13:31:59 +0200 Subject: [PATCH 15/40] testsuite: store mgr-sync credentials in the config In some cases mgr-sync seems to wait for ever for the user / password on the input provided by `echo -e`. Using the `~/.mgr-sync` configuration file is more reliable. --- testsuite/features/step_definitions/command_steps.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 8ba956d41169..72485d926b13 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -299,7 +299,8 @@ end When(/^I execute mgr\-sync "([^"]*)" with user "([^"]*)" and password "([^"]*)"$/) do |arg1, u, p| - $command_output, _code = get_target('server').run("echo -e '#{u}\n#{p}\n' | mgr-sync #{arg1}", check_errors: false, buffer_size: 1_000_000) + get_target('server').run("echo -e \'mgrsync.user = \"#{u}\"\nmgrsync.password = \"#{p}\"\n\' > ~/.mgr-sync") + $command_output, _code = get_target('server').run("mgr-sync #{arg1}", check_errors: false, buffer_size: 1_000_000) end When(/^I execute mgr\-sync "([^"]*)"$/) do |arg1| From f6a854dbdc13804cfd7170935377868bafa54832 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 19 Sep 2023 16:18:23 +0200 Subject: [PATCH 16/40] testsuite: add more logs to catch reposync killing issue --- testsuite/features/step_definitions/command_steps.rb | 4 +++- testsuite/features/support/lavanda.rb | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 72485d926b13..bb5357982713 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -355,8 +355,9 @@ reposync_not_running_streak = 0 reposync_left_running_streak = 0 while reposync_not_running_streak <= 60 - command_output, _code = get_target('server').run('ps axo pid,cmd | grep spacewalk-repo-sync | grep -v grep', check_errors: false) + command_output, _code = get_target('server').run('ps axo pid,cmd | grep spacewalk-repo-sync | grep -v grep', check_errors: false, verbose: true) if command_output.empty? + log "Empty command!" reposync_not_running_streak += 1 reposync_left_running_streak = 0 sleep 1 @@ -366,6 +367,7 @@ process = command_output.split("\n")[0] channel = process.split(' ')[5] + log "Processing channel '#{channel}'" if do_not_kill.include? channel $channels_synchronized.add(channel) log "Reposync of channel #{channel} left running" if (reposync_left_running_streak % 60).zero? diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index b298cb1cd5b3..9c371c0c0e63 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -176,7 +176,7 @@ def run_local(cmd, separated_results: false, check_errors: true, timeout: DEFAUL if check_errors raise "FAIL: #{cmd} returned status code = #{code}.\nOutput:\n#{out}" unless successcodes.include?(code) end - STDOUT.puts "#{cmd} returned status code = #{code}.\nOutput:\n#{out}" if verbose + STDOUT.puts "#{cmd} returned status code = #{code}.\nOutput:\n'#{out}'" if verbose if separated_results [out, err, code] else From da5e46ab224ce0c0fcca42ac1018c9f76ce393d4 Mon Sep 17 00:00:00 2001 From: Ondrej Holecek Date: Mon, 18 Sep 2023 15:10:56 +0200 Subject: [PATCH 17/40] Precreate system users to always have well known UID and GID - UID and GIDs are made to match each other if possible - IDs starts at 10550 to prevent conflicts with host system accounts - postgres has to have login shell, required during configuration --- containers/init-image/Dockerfile | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/containers/init-image/Dockerfile b/containers/init-image/Dockerfile index 75c8a0598f87..bb1ea820b39e 100644 --- a/containers/init-image/Dockerfile +++ b/containers/init-image/Dockerfile @@ -16,6 +16,21 @@ LABEL org.opencontainers.image.vendor="Uyuni project" LABEL org.opencontainers.image.url="https://www.uyuni-project.org/" # endlabelprefix +# Create stable static UID and GID for salt, tomcat, apache (wwwrun), postgres, ... +RUN groupadd -r --gid 10550 susemanager && \ + groupadd -r --gid 10551 tomcat && \ + groupadd -r --gid 10552 www && \ + groupadd -r --gid 10553 wwwrun && \ + groupadd -r --gid 10554 salt && \ + groupadd -r --gid 10555 tftp && \ + groupadd -r --gid 10556 postgres + +RUN useradd -r -s /usr/sbin/nologin -G susemanager,www -g tomcat -d /usr/share/tomcat --uid 10551 tomcat && \ + useradd -r -s /usr/sbin/nologin -G susemanager,www -g wwwrun -d /var/lib/wwwrun --uid 10552 wwwrun && \ + useradd -r -s /usr/sbin/nologin -G susemanager -g salt -d /var/lib/salt --uid 10554 salt && \ + useradd -r -s /usr/sbin/nologin -g tftp -d /srv/tftpboot --uid 10555 tftp && \ + useradd -r -s /usr/bin/bash -g postgres -d /var/lib/pgsql --uid 10556 postgres + # Fill the image with content and clean the cache(s) RUN set -euo pipefail; zypper -n in --no-recommends systemd gzip; zypper -n clean; rm -rf /var/log/* CMD ["/usr/lib/systemd/systemd"] From 2752c8c8530afe06cd266573357531ad676f5da4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 20 Sep 2023 10:33:16 +0200 Subject: [PATCH 18/40] testsuite: move the redfish fake server to controller Running the redfish mocked server on the server would require exposing the 8443 port on the container, which is impossible when the pod is running, and not desirable. Moving the fake server to the controller works around this issue. --- .../srv_power_management_redfish.feature | 6 +- .../step_definitions/command_steps.rb | 77 +++++++------------ .../step_definitions/navigation_steps.rb | 5 +- testsuite/features/support/kubernetes.rb | 46 +++++++++++ 4 files changed, 80 insertions(+), 54 deletions(-) create mode 100644 testsuite/features/support/kubernetes.rb diff --git a/testsuite/features/secondary/srv_power_management_redfish.feature b/testsuite/features/secondary/srv_power_management_redfish.feature index b4ab15913810..dca79a42d99b 100644 --- a/testsuite/features/secondary/srv_power_management_redfish.feature +++ b/testsuite/features/secondary/srv_power_management_redfish.feature @@ -8,7 +8,7 @@ Feature: Redfish Power management Scenario: Setup a Redfish host - When the server starts mocking a Redfish host + When the controller starts mocking a Redfish host Scenario: Log in as admin user Given I am authorized for the "Admin" section @@ -20,7 +20,7 @@ Feature: Redfish Power management Given I am on the Systems overview page of this "sle_minion" When I follow "Provisioning" in the content area And I follow "Power Management" in the content area - And I enter the server hostname as the redfish server address + And I enter the controller hostname as the redfish server address And I enter "ipmiusr" as "powerUsername" And I enter "test" as "powerPassword" And I select "Redfish" from "powerType" @@ -94,7 +94,7 @@ Feature: Redfish Power management And the cobbler report should contain "Power Management Type : ipmilan" for "sle_minion" Scenario: Cleanup: tear down the Redfish host - When the server stops mocking a Redfish host + When the controller stops mocking a Redfish host Scenario: Cleanup: remove remaining systems from SSM after Redfish power management tests When I click on the clear SSM button diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index bb5357982713..fcb5a1ad68c8 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -574,26 +574,38 @@ get_target('server').run('pkill fake_ipmi_host.sh || :') end -When(/^the server starts mocking a Redfish host$/) do - get_target('server').run('mkdir -p /root/Redfish-Mockup-Server/') - %w[redfishMockupServer.py rfSsdpServer.py].each do |file| - source = File.dirname(__FILE__) + '/../upload_files/Redfish-Mockup-Server/' + file - dest = '/root/Redfish-Mockup-Server/' + file - return_code = file_inject(get_target('server'), source, dest) - raise 'File injection failed' unless return_code.zero? +When(/^the controller starts mocking a Redfish host$/) do + # We need the controller hostname to generate its SSL certificate + hostname = `hostname -f`.strip + + _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) + if code.zero? + # On kubernetes, the server has no clue about certificates + crt_path, key_path, _ca_path = generate_certificate("controller", hostname) + get_target('server').extract_file(crt_path, '/root/controller.crt') + get_target('server').extract_file(key_path, '/root/controller.key') + else + get_target('server').run("mgr-ssl-tool --gen-server -d /root/ssl-build --no-rpm -p spacewalk --set-hostname #{hostname} --server-cert=controller.crt --server-key=controller.key") + key_path, _err = get_target('server').run('ls /root/ssl-build/*/controller.key') + crt_path, _err = get_target('server').run('ls /root/ssl-build/*/controller.crt') + + file_extract(get_target('server'), key_path.strip, '/root/controller.key') + file_extract(get_target('server'), crt_path.strip, '/root/controller.crt') end - get_target('server').run('curl --output /root/DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') - get_target('server').run('unzip /root/DSP2043_2019.1.zip -d /root/') - cmd = "/usr/bin/python3 /root/Redfish-Mockup-Server/redfishMockupServer.py " \ - "-H #{get_target('server').full_hostname} -p 8443 " \ + + `curl --output /root/DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip` + `unzip /root/DSP2043_2019.1.zip -d /root/` + cmd = "/usr/bin/python3 #{File.dirname(__FILE__)}/../upload_files/Redfish-Mockup-Server/redfishMockupServer.py " \ + "-H #{hostname} -p 8443 " \ "-S -D /root/DSP2043_2019.1/public-catfish/ " \ - "--ssl --cert /etc/pki/tls/certs/spacewalk.crt --key /etc/pki/tls/private/spacewalk.key " \ + "--ssl --cert /root/controller.crt --key /root/controller.key " \ "< /dev/null > /dev/null 2>&1 &" - get_target('server').run(cmd) + `#{cmd}` end -When(/^the server stops mocking a Redfish host$/) do - get_target('server').run('pkill -e -f /root/Redfish-Mockup-Server/redfishMockupServer.py') +When(/^the controller stops mocking a Redfish host$/) do + `pkill -e -f #{File.dirname(__FILE__)}/../upload_files/Redfish-Mockup-Server/redfishMockupServer.py` + `rm -rf /root/DSP2043_2019.1*` end When(/^I install a user-defined state for "([^"]*)" on the server$/) do |host| @@ -1011,40 +1023,7 @@ _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) if code.zero? # Server running in Kubernetes doesn't know anything about SSL CA - certificate = "apiVersion: cert-manager.io/v1\\n"\ - "kind: Certificate\\n"\ - "metadata:\\n"\ - " name: uyuni-proxy\\n"\ - "spec:\\n"\ - " secretName: uyuni-proxy-cert\\n"\ - " subject:\\n"\ - " countries: ['DE']\\n"\ - " provinces: ['Bayern']\\n"\ - " localities: ['Nuernberg']\\n"\ - " organizations: ['SUSE']\\n"\ - " organizationalUnits: ['SUSE']\\n"\ - " emailAddresses:\\n"\ - " - galaxy-noise@suse.de\\n"\ - " commonName: #{get_target('proxy').full_hostname}\\n"\ - " dnsNames:\\n"\ - " - #{get_target('proxy').full_hostname}\\n"\ - " issuerRef:\\n"\ - " name: uyuni-ca-issuer\\n"\ - " kind: Issuer" - _out, return_code = get_target('server').run_local("echo -e \"#{certificate}\" | kubectl apply -f -") - raise 'Failed to define proxy Certificate resource' unless return_code.zero? - # cert-manager takes some time to generate the secret, wait for it before continuing - repeat_until_timeout(timeout: 600, message: "Kubernetes uyuni-proxy-cert secret has not been defined") do - _result, code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert", check_errors: false) - break if code.zero? - sleep 1 - end - _out, return_code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.crt}' | base64 -d >/tmp/proxy.crt") - raise 'Failed to store proxy certificate' unless return_code.zero? - _out, return_code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.key}' | base64 -d >/tmp/proxy.key") - raise 'Failed to store proxy key' unless return_code.zero? - _out, return_code = get_target('server').run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.ca\\.crt}' | base64 -d >/tmp/ca.crt") - raise 'Failed to store CA certificate' unless return_code.zero? + generate_certificate("proxy", get_target('proxy').full_hostname) %w[proxy.crt proxy.key ca.crt].each do |file| return_code, = get_target('server').extract_file("/tmp/#{file}", "/tmp/#{file}") diff --git a/testsuite/features/step_definitions/navigation_steps.rb b/testsuite/features/step_definitions/navigation_steps.rb index fe8d78530114..f0508eaf8b47 100644 --- a/testsuite/features/step_definitions/navigation_steps.rb +++ b/testsuite/features/step_definitions/navigation_steps.rb @@ -1064,8 +1064,9 @@ find(:xpath, '//select[@id=\'maintenance-window-select\']/option', match: :first).select_option end -When(/^I enter the server hostname as the redfish server address$/) do - step %(I enter "#{get_target('server').full_hostname}:8443" as "powerAddress") +When(/^I enter the controller hostname as the redfish server address$/) do + hostname = `hostname -f`.strip + step %(I enter "#{hostname}:8443" as "powerAddress") end When(/^I clear browser cookies$/) do diff --git a/testsuite/features/support/kubernetes.rb b/testsuite/features/support/kubernetes.rb new file mode 100644 index 000000000000..095f0475e875 --- /dev/null +++ b/testsuite/features/support/kubernetes.rb @@ -0,0 +1,46 @@ +# Create an SSL certificate using cert-manager and return the path on the server where the files have been copied +def generate_certificate(name, fqdn) + certificate = 'apiVersion: cert-manager.io/v1\\n'\ + 'kind: Certificate\\n'\ + 'metadata:\\n'\ + " name: uyuni-#{name}\\n"\ + 'spec:\\n'\ + " secretName: uyuni-#{name}-cert\\n"\ + ' subject:\\n'\ + " countries: ['DE']\\n"\ + " provinces: ['Bayern']\\n"\ + " localities: ['Nuernberg']\\n"\ + " organizations: ['SUSE']\\n"\ + " organizationalUnits: ['SUSE']\\n"\ + ' emailAddresses:\\n'\ + ' - galaxy-noise@suse.de\\n'\ + " commonName: #{fqdn}\\n"\ + ' dnsNames:\\n'\ + " - #{fqdn}\\n"\ + ' issuerRef:\\n'\ + ' name: uyuni-ca-issuer\\n'\ + ' kind: Issuer' + _out, return_code = get_target('server').run_local("echo -e \"#{certificate}\" | kubectl apply -f -") + raise "Failed to define #{name} Certificate resource" unless return_code.zero? + + # cert-manager takes some time to generate the secret, wait for it before continuing + repeat_until_timeout(timeout: 600, message: "Kubernetes uyuni-#{name}-cert secret has not been defined") do + _result, code = get_target('server').run_local("kubectl get secret uyuni-#{name}-cert", check_errors: false) + break if code.zero? + + sleep 1 + end + + crt_path = "/tmp/#{name}.crt" + key_path = "/tmp/#{name}.key" + ca_path = '/tmp/ca.crt' + + _out, return_code = get_target('server').run_local("kubectl get secret uyuni-#{name}-cert -o jsonpath='{.data.tls\\.crt}' | base64 -d >#{crt_path}") + raise "Failed to store #{name} certificate" unless return_code.zero? + + _out, return_code = get_target('server').run_local("kubectl get secret uyuni-#{name}-cert -o jsonpath='{.data.tls\\.key}' | base64 -d >#{key_path}") + raise "Failed to store #{name} key" unless return_code.zero? + + get_target('server').run_local("kubectl get secret uyuni-#{name}-cert -o jsonpath='{.data.ca\\.crt}' | base64 -d >#{ca_path}") + [crt_path, key_path, ca_path] +end From c7b787b1943d2ab29941220c641d7fff63d2668d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 26 Sep 2023 11:38:29 +0200 Subject: [PATCH 19/40] testsuite: adapt container proxy config for k3s --- .../step_definitions/command_steps.rb | 35 ++++++++++++------- testsuite/features/support/kubernetes.rb | 6 ++++ 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index fcb5a1ad68c8..c7ffba59ef20 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -578,8 +578,7 @@ # We need the controller hostname to generate its SSL certificate hostname = `hostname -f`.strip - _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) - if code.zero? + if running_k3s? # On kubernetes, the server has no clue about certificates crt_path, key_path, _ca_path = generate_certificate("controller", hostname) get_target('server').extract_file(crt_path, '/root/controller.crt') @@ -1020,8 +1019,7 @@ end When(/^I copy server\'s keys to the proxy$/) do - _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) - if code.zero? + if running_k3s? # Server running in Kubernetes doesn't know anything about SSL CA generate_certificate("proxy", get_target('proxy').full_hostname) @@ -1043,8 +1041,6 @@ end When(/^I configure the proxy$/) do - _out, code = get_target('server').run_local("systemctl is-active k3s", check_errors: false) - # prepare the settings file settings = "RHN_PARENT=#{get_target('server').full_hostname}\n" \ "HTTP_PROXY=''\n" \ @@ -1054,7 +1050,7 @@ "POPULATE_CONFIG_CHANNEL=y\n" \ "RHN_USER=admin\n" \ "ACTIVATE_SLP=y\n" - settings += if code.zero? + settings += if running_k3s? "USE_EXISTING_CERTS=y\n" \ "CA_CERT=/tmp/ca.crt\n" \ "SERVER_KEY=/tmp/proxy.key\n" \ @@ -1402,11 +1398,26 @@ end When(/^I generate the configuration "([^"]*)" of Containerized Proxy on the server$/) do |file_path| - # Doc: https://www.uyuni-project.org/uyuni-docs/en/uyuni/reference/spacecmd/proxy_container.html - command = "echo spacewalk > cert_pass && spacecmd -u admin -p admin proxy_container_config_generate_cert" \ - " -- -o #{file_path} -p 8022 #{get_target('proxy').full_hostname.sub('pxy', 'pod-pxy')} #{get_target('server').full_hostname}" \ - " 2048 galaxy-noise@suse.de --ca-pass cert_pass" \ - " && rm cert_pass" + if running_k3s? + # A server container on kubernetes has no clue about SSL certificates + # We need to generate them using `cert-manager` and use the files as 3rd party certificate + generate_certificate("proxy", get_target('proxy').full_hostname) + + # Copy the cert files in the container to use them with spacecmd + %w[proxy.crt proxy.key ca.crt].each do |file| + get_target('server').inject("/tmp/#{file}", "/tmp/#{file}") + end + + command = "spacecmd -u admin -p admin proxy_container_config -- -o #{file_path} -p 8022 " \ + "#{get_target('proxy').full_hostname.sub('pxy', 'pod-pxy')} #{get_target('server').full_hostname} 2048 galaxy-noise@suse.de " \ + "/tmp/ca.crt /tmp/proxy.crt /tmp/proxy.key" + else + # Doc: https://www.uyuni-project.org/uyuni-docs/en/uyuni/reference/spacecmd/proxy_container.html + command = "echo spacewalk > cert_pass && spacecmd -u admin -p admin proxy_container_config_generate_cert" \ + " -- -o #{file_path} -p 8022 #{get_target('proxy').full_hostname.sub('pxy', 'pod-pxy')} #{get_target('server').full_hostname}" \ + " 2048 galaxy-noise@suse.de --ca-pass cert_pass" \ + " && rm cert_pass" + end get_target('server').run(command) end diff --git a/testsuite/features/support/kubernetes.rb b/testsuite/features/support/kubernetes.rb index 095f0475e875..7714ea0fd8a7 100644 --- a/testsuite/features/support/kubernetes.rb +++ b/testsuite/features/support/kubernetes.rb @@ -44,3 +44,9 @@ def generate_certificate(name, fqdn) get_target('server').run_local("kubectl get secret uyuni-#{name}-cert -o jsonpath='{.data.ca\\.crt}' | base64 -d >#{ca_path}") [crt_path, key_path, ca_path] end + +# Returns whether the server is running in a k3s container or not +def running_k3s? + _out, code = get_target('server').run_local('systemctl is-active k3s', check_errors: false) + code.zero? +end From a0c70b780b6f6dc4f225c9dd6bdfe2daa35ff4fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 28 Sep 2023 16:43:30 +0200 Subject: [PATCH 20/40] Use localhost for cobbler client to avoid hairpins --- spacewalk/setup/bin/spacewalk-setup-cobbler | 1 + spacewalk/setup/spacewalk-setup.changes.cbosdo.tag-cleanup | 1 + 2 files changed, 2 insertions(+) create mode 100644 spacewalk/setup/spacewalk-setup.changes.cbosdo.tag-cleanup diff --git a/spacewalk/setup/bin/spacewalk-setup-cobbler b/spacewalk/setup/bin/spacewalk-setup-cobbler index 707a5f111170..8d9cca655337 100755 --- a/spacewalk/setup/bin/spacewalk-setup-cobbler +++ b/spacewalk/setup/bin/spacewalk-setup-cobbler @@ -75,6 +75,7 @@ def manipulate_cobbler_settings(config_dir: str, settings_yaml: str, fqdn: str): filecontent["pxe_just_once"] = True filecontent["redhat_management_server"] = fqdn or socket.getfqdn() + filecontent["client_use_localhost"] = True yaml_dump = yaml.safe_dump(filecontent) with open(full_path, "w") as settings_file: settings_file.write(yaml_dump) diff --git a/spacewalk/setup/spacewalk-setup.changes.cbosdo.tag-cleanup b/spacewalk/setup/spacewalk-setup.changes.cbosdo.tag-cleanup new file mode 100644 index 000000000000..1519aef90799 --- /dev/null +++ b/spacewalk/setup/spacewalk-setup.changes.cbosdo.tag-cleanup @@ -0,0 +1 @@ +- Use localhost for Cobbler client to avoid hairpins From 0ac162b68a7ffb2a0115c7dcc3a9c8637d5c2fb6 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Mon, 11 Sep 2023 16:53:54 +0200 Subject: [PATCH 21/40] change webapp folder --- java/spacewalk-java.spec | 91 ++++++++++++----------- spacewalk/setup/share/add_appbase.xml.xsl | 23 ++++++ spacewalk/setup/spacewalk-setup.spec | 5 ++ 3 files changed, 77 insertions(+), 42 deletions(-) create mode 100644 spacewalk/setup/share/add_appbase.xml.xsl diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index e42c02bd1c49..06405dfa5851 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -28,6 +28,7 @@ %define run_checkstyle 0 %define omit_tests 1 +%define servewwwdir /usr/share/susemanager/www %if 0%{?suse_version} %define serverdir /srv %define apache_group www @@ -55,6 +56,9 @@ %define supported_locales en_US,ko,ja,zh_CN %endif +%define serverxmltool %{_libexecdir}/tomcat/serverxml-tool.sh + + Name: spacewalk-java Summary: Java web application files for Spacewalk License: GPL-2.0-only @@ -312,7 +316,7 @@ This package contains testing files of spacewalk-java. %{_datadir}/rhn/lib/rhn-test.jar %{_datadir}/rhn/unit-tests/* %{_datadir}/rhn/unittest.xml -%attr(644, tomcat, tomcat) %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/commons-lang3.jar +%attr(644, tomcat, tomcat) %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/commons-lang3.jar %endif %package apidoc-sources @@ -498,10 +502,11 @@ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk/ export NO_BRP_STALE_LINK_ERROR=yes +mkdir -p $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/ %if 0%{?suse_version} ant -Dproduct.name="'$PRODUCT_NAME'" -Dprefix=$RPM_BUILD_ROOT -Dtomcat="tomcat9" install-tomcat9-suse -install -d -m 755 $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/META-INF/ -install -m 755 conf/rhn-tomcat9.xml $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/META-INF/context.xml +install -d -m 755 $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/META-INF/ +install -m 755 conf/rhn-tomcat9.xml $RPM_BUILD_ROOT%{serverwwwrdir}/tomcat/webapps/rhn/META-INF/context.xml %else ant -Dproduct.name="'$PRODUCT_NAME'" -Dprefix=$RPM_BUILD_ROOT install-tomcat install -d -m 755 $RPM_BUILD_ROOT%{_sysconfdir}/tomcat/Catalina/localhost/ @@ -585,7 +590,7 @@ install -m 644 conf/cobbler/snippets/sles_register_script $RPM_BUILD_ROOT%{space install -m 644 conf/cobbler/snippets/sles_no_signature_checks $RPM_BUILD_ROOT%{spacewalksnippetsdir}/sles_no_signature_checks install -m 644 conf/cobbler/snippets/wait_for_networkmanager_script $RPM_BUILD_ROOT%{spacewalksnippetsdir}/wait_for_networkmanager_script -ln -s -f %{_javadir}/dwr.jar $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/dwr.jar +ln -s -f %{_javadir}/dwr.jar $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/dwr.jar # special links for rhn-search RHN_SEARCH_BUILD_DIR=%{_prefix}/share/rhn/search/lib @@ -598,10 +603,10 @@ if [ -e %{_javadir}/ongres-stringprep/stringprep.jar ]; then ln -s -f %{_javadir}/ongres-stringprep/stringprep.jar $RPM_BUILD_ROOT$RHN_SEARCH_BUILD_DIR/ongres-stringprep_stringprep.jar ln -s -f %{_javadir}/ongres-stringprep/saslprep.jar $RPM_BUILD_ROOT$RHN_SEARCH_BUILD_DIR/ongres-stringprep_saslprep.jar echo " -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_stringprep.jar -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_saslprep.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_stringprep.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_saslprep.jar %{_prefix}/share/rhn/search/lib/ongres-scram_client.jar %{_prefix}/share/rhn/search/lib/ongres-scram_common.jar %{_prefix}/share/rhn/search/lib/ongres-stringprep_stringprep.jar @@ -609,8 +614,8 @@ if [ -e %{_javadir}/ongres-stringprep/stringprep.jar ]; then " > .mfiles-postgresql else echo " -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar %{_prefix}/share/rhn/search/lib/ongres-scram_client.jar %{_prefix}/share/rhn/search/lib/ongres-scram_common.jar " > .mfiles-postgresql @@ -621,10 +626,10 @@ mkdir -p $RPM_BUILD_ROOT%{_docdir}/%{name}/xml install -m 644 build/reports/apidocs/docbook/susemanager_api_doc.xml $RPM_BUILD_ROOT%{_docdir}/%{name}/xml/susemanager_api_doc.xml cp -R build/reports/apidocs/asciidoc/ $RPM_BUILD_ROOT%{_docdir}/%{name}/asciidoc/ # delete JARs which must not be deployed -rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jspapi.jar -rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-compiler.jar -rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-runtime.jar -rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/tomcat*.jar +rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jspapi.jar +rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-compiler.jar +rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-runtime.jar +rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/tomcat*.jar %if 0%{?omit_tests} > 0 rm -rf $RPM_BUILD_ROOT%{_datadir}/rhn/lib/rhn-test.jar rm -rf $RPM_BUILD_ROOT/classes/com/redhat/rhn/common/conf/test/conf @@ -635,16 +640,16 @@ rm -rf $RPM_BUILD_ROOT%{_datadir}/rhn/unittest.xml mkdir -p $RPM_BUILD_ROOT%{_var}/log/rhn # Prettifying symlinks -mv $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-loggingjboss-logging.jar $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-logging.jar +mv $RPM_BUILD_ROOT%{servewwwrdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-loggingjboss-logging.jar $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-logging.jar # Removing unused symlinks. %if 0%{?rhel} -rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/javamailmail.jar +rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/javamailmail.jar %endif # show all JAR symlinks echo "#### SYMLINKS START ####" -find $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib -name *.jar +find $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib -name *.jar echo "#### SYMLINKS END ####" %pre -n spacewalk-taskomatic @@ -696,35 +701,37 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %defattr(644,tomcat,tomcat,775) %attr(775, %{salt_user_group}, %{salt_user_group}) %dir %{serverdir}/susemanager/salt/salt_ssh %attr(700, %{salt_user_group}, %{salt_user_group}) %dir %{serverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -%attr(775, root, tomcat) %dir %{serverdir}/tomcat/webapps +%dir %{serverwwwdir}/tomcat +%dir %{serverwwwdir}/tomcat/webapps +%attr(775, root, tomcat) %dir %{serverwwwdir}/tomcat/webapps %dir %{serverdir}/susemanager %dir %{serverdir}/susemanager/salt %attr(775,tomcat,susemanager) %dir %{serverdir}/susemanager/pillar_data %attr(775,tomcat,susemanager) %dir %{serverdir}/susemanager/pillar_data/images %dir %{serverdir}/susemanager/formula_data -%attr(770, tomcat, %{salt_user_group}) %dir %{serverdir}/susemanager/tmp -%dir %{serverdir}/tomcat/webapps/rhn/ -%{serverdir}/tomcat/webapps/rhn/apidoc/ -%{serverdir}/tomcat/webapps/rhn/css/ -%{serverdir}/tomcat/webapps/rhn/errata/ -%{serverdir}/tomcat/webapps/rhn/img/ -%{serverdir}/tomcat/webapps/rhn/META-INF/ -%{serverdir}/tomcat/webapps/rhn/schedule/ -%{serverdir}/tomcat/webapps/rhn/systems/ -%{serverdir}/tomcat/webapps/rhn/users/ -%{serverdir}/tomcat/webapps/rhn/errors/ -%{serverdir}/tomcat/webapps/rhn/*.jsp -%{serverdir}/tomcat/webapps/rhn/WEB-INF/classes -%{serverdir}/tomcat/webapps/rhn/WEB-INF/decorators -%{serverdir}/tomcat/webapps/rhn/WEB-INF/includes -%{serverdir}/tomcat/webapps/rhn/WEB-INF/nav -%{serverdir}/tomcat/webapps/rhn/WEB-INF/pages -%{serverdir}/tomcat/webapps/rhn/WEB-INF/*.xml +%attr(770, tomcat, %{salt_user_group}) %dir %{serverwwwdir}/susemanager/tmp +%dir %{serverwwwdir}/tomcat/webapps/rhn/ +%{serverwwwdir}/tomcat/webapps/rhn/apidoc/ +%{serverwwwdir}/tomcat/webapps/rhn/css/ +%{serverwwwdir}/tomcat/webapps/rhn/errata/ +%{serverwwwdir}/tomcat/webapps/rhn/img/ +%{serverwwwdir}/tomcat/webapps/rhn/META-INF/ +%{serverwwwdir}/tomcat/webapps/rhn/schedule/ +%{serverwwwdir}/tomcat/webapps/rhn/systems/ +%{serverwwwdir}/tomcat/webapps/rhn/users/ +%{serverwwwdir}/tomcat/webapps/rhn/errors/ +%{serverwwwdir}/tomcat/webapps/rhn/*.jsp +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/classes +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/decorators +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/includes +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/nav +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/pages +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/*.xml # all jars in WEB-INF/lib/ -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib -%exclude %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar -%exclude %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-*.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib +%exclude %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar +%exclude %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-*.jar # owned by cobbler needs cobbler permissions %attr(755,root,root) %dir %{cobprofdir} @@ -745,7 +752,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %config %{spacewalksnippetsdir}/sles_no_signature_checks %config %{spacewalksnippetsdir}/wait_for_networkmanager_script %if 0%{?suse_version} -%config(noreplace) %{serverdir}/tomcat/webapps/rhn/META-INF/context.xml +%config(noreplace) %{serverwwwdir}/tomcat/webapps/rhn/META-INF/context.xml %else %config(noreplace) %{_sysconfdir}/tomcat/Catalina/localhost/rhn.xml %endif @@ -753,7 +760,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %attr(755, tomcat, root) %dir %{_localstatedir}/lib/spacewalk/scc %attr(755, tomcat, root) %dir %{_localstatedir}/lib/spacewalk/subscription-matcher -%dir %{serverdir}/tomcat/webapps/rhn/WEB-INF +%dir %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF %files -n spacewalk-taskomatic %defattr(644,root,root,775) @@ -794,7 +801,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %defattr(644,root,root,755) %dir %{_prefix}/share/rhn/search %dir %{_prefix}/share/rhn/search/lib -%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar +%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar %{_prefix}/share/rhn/search/lib/postgresql-jdbc.jar %changelog diff --git a/spacewalk/setup/share/add_appbase.xml.xsl b/spacewalk/setup/share/add_appbase.xml.xsl new file mode 100644 index 000000000000..d241732b4a6e --- /dev/null +++ b/spacewalk/setup/share/add_appbase.xml.xsl @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + /usr/share/susemanager/www/tomcat/webapps + + + + + + diff --git a/spacewalk/setup/spacewalk-setup.spec b/spacewalk/setup/spacewalk-setup.spec index 86ebc41de04a..a41cfe7bf2bf 100644 --- a/spacewalk/setup/spacewalk-setup.spec +++ b/spacewalk/setup/spacewalk-setup.spec @@ -167,6 +167,7 @@ install -m 0644 share/tomcat_java_opts_suse.conf %{buildroot}/%{_sysconfdir}/tom %endif install -m 0644 share/server.xml.xsl %{buildroot}/%{_datadir}/spacewalk/setup/ install -m 0644 share/server_update.xml.xsl %{buildroot}/%{_datadir}/spacewalk/setup/ +install -m 0644 share/add_appbase.xml.xsl %{buildroot}/%{_datadir}/spacewalk/setup/ install -m 0644 share/old-jvm-list %{buildroot}/%{_datadir}/spacewalk/setup/ install -m 0644 share/vhost-nossl.conf %{buildroot}/%{_datadir}/spacewalk/setup/ install -d -m 755 %{buildroot}/%{_datadir}/spacewalk/setup/defaults.d/ @@ -196,6 +197,10 @@ if [ ! -f /etc/rhn/rhn.conf -o $(filesize /etc/rhn/rhn.conf) -eq 0 ]; then CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE xsltproc %{_datadir}/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml + + CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") + cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE + xsltproc %{_datadir}/spacewalk/setup/add_appbase.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml fi if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then From d5f5a7abe417816f907cf1db0928e4897cdc1f10 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 12 Sep 2023 10:25:20 +0200 Subject: [PATCH 22/40] use /usr/share/susemanager/www/ instead of /srv/www/htdocs --- branding/spacewalk-branding.spec | 11 +- java/build.xml | 2 +- .../localization/LocalizationService.java | 3 +- .../rhn/frontend/action/help/EulaAction.java | 2 +- java/manager-build.xml | 6 +- java/spacewalk-java.spec | 103 +++++++++--------- python/spacewalk/spacewalk-backend.spec | 2 +- .../spacewalk-selinux-enable | 6 +- .../etc/httpd/conf.d/zz-spacewalk-www.conf | 2 +- spacewalk/setup/lib/Spacewalk/Setup.pm | 2 +- spacewalk/setup/share/add_appbase.xml.xsl | 32 +++--- spacewalk/setup/share/vhost-nossl.conf | 2 +- .../susemanager-branding-oss.spec | 2 +- .../susemanager-sls/susemanager-sls.spec | 2 +- web/html/Makefile | 2 +- web/spacewalk-web.spec | 8 +- 16 files changed, 99 insertions(+), 88 deletions(-) diff --git a/branding/spacewalk-branding.spec b/branding/spacewalk-branding.spec index 2e82e047a904..d48cd98606bf 100644 --- a/branding/spacewalk-branding.spec +++ b/branding/spacewalk-branding.spec @@ -24,8 +24,10 @@ %global wwwdocroot %{_var}/www/html %else %if 0%{?suse_version} -%global tomcat_path /srv/tomcat -%global wwwdocroot /srv/www/htdocs +%global susemanager_shared_path /usr/share/susemanager +%global wwwroot %{susemanager_shared_path}/www +%global tomcat_path %{wwwroot}/tomcat +%global wwwdocroot %{wwwroot}/htdocs %else %global tomcat_path %{_var}/lib/tomcat6 %global wwwdocroot %{_var}/www/html @@ -93,6 +95,11 @@ ln -s %{_datadir}/rhn/lib/java-branding.jar %{buildroot}%{tomcat_path}/webapps/r %{tomcat_path}/webapps/rhn/WEB-INF/lib/java-branding.jar %license LICENSE %if 0%{?suse_version} +%attr(775,tomcat,tomcat) %dir %{susemanager_shared_path} +%attr(775,tomcat,tomcat) %dir %{wwwroot} +%attr(775,tomcat,tomcat) %dir %{wwwdocroot} +%attr(775,tomcat,tomcat) %dir %{tomcat_path} +%attr(775,tomcat,tomcat) %dir %{tomcat_path}/webapps %attr(775,tomcat,tomcat) %dir %{tomcat_path}/webapps/rhn %attr(775,tomcat,tomcat) %dir %{tomcat_path}/webapps/rhn/WEB-INF %attr(775,tomcat,tomcat) %dir %{tomcat_path}/webapps/rhn/WEB-INF/lib/ diff --git a/java/build.xml b/java/build.xml index 2bd005919bf4..4e7f31279be4 100644 --- a/java/build.xml +++ b/java/build.xml @@ -564,7 +564,7 @@ - + diff --git a/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java b/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java index 6c1e3b44d69a..4e6c1735f0a6 100644 --- a/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java +++ b/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java @@ -62,6 +62,7 @@ public class LocalizationService { */ public static final String RHN_DB_DATEFORMAT = "yyyy-MM-dd HH:mm:ss"; public static final String RHN_CUSTOM_DATEFORMAT = "yyyy-MM-dd HH:mm:ss z"; + private static final String DOC_FOLDER= "/usr/share/susemanager/www/htdocs/docs"; private static Logger log = LogManager.getLogger(LocalizationService.class); private static Logger msgLogger = LogManager.getLogger("com.redhat.rhn.common.localization.messages"); @@ -624,7 +625,7 @@ public List getInstalledDocsLocales() { List tmp = new LinkedList<>(); // Get locales of installed documentations - File f = new File("/srv/www/htdocs/docs"); + File f = new File(DOC_FOLDER); String[] locales = f.list(); if (locales != null) { tmp.addAll(Arrays.asList(locales)); diff --git a/java/code/src/com/redhat/rhn/frontend/action/help/EulaAction.java b/java/code/src/com/redhat/rhn/frontend/action/help/EulaAction.java index 020c707955c5..0035ce8c4435 100644 --- a/java/code/src/com/redhat/rhn/frontend/action/help/EulaAction.java +++ b/java/code/src/com/redhat/rhn/frontend/action/help/EulaAction.java @@ -36,7 +36,7 @@ * EULA action page. */ public class EulaAction extends org.apache.struts.action.Action { - private static final File EULA_PATH = new File("/srv/www/htdocs/help/eula.html"); + private static final File EULA_PATH = new File("/usr/share/susemanager/www/htdocs/help/eula.html"); /** {@inheritDoc} */ @Override diff --git a/java/manager-build.xml b/java/manager-build.xml index cc520f348332..7ee7dcc6c58f 100644 --- a/java/manager-build.xml +++ b/java/manager-build.xml @@ -21,7 +21,7 @@ - + @@ -242,7 +242,7 @@ - + @@ -334,7 +334,7 @@ diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index 06405dfa5851..5862e7d4f8b1 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -28,9 +28,9 @@ %define run_checkstyle 0 %define omit_tests 1 -%define servewwwdir /usr/share/susemanager/www +%define susemanagershareddir /usr/share/susemanager +%define serverdir %{susemanagershareddir}/www %if 0%{?suse_version} -%define serverdir /srv %define apache_group www %define salt_user_group salt %define apache2 apache2 @@ -56,9 +56,6 @@ %define supported_locales en_US,ko,ja,zh_CN %endif -%define serverxmltool %{_libexecdir}/tomcat/serverxml-tool.sh - - Name: spacewalk-java Summary: Java web application files for Spacewalk License: GPL-2.0-only @@ -316,7 +313,7 @@ This package contains testing files of spacewalk-java. %{_datadir}/rhn/lib/rhn-test.jar %{_datadir}/rhn/unit-tests/* %{_datadir}/rhn/unittest.xml -%attr(644, tomcat, tomcat) %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/commons-lang3.jar +%attr(644, tomcat, tomcat) %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/commons-lang3.jar %endif %package apidoc-sources @@ -502,11 +499,11 @@ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk/ export NO_BRP_STALE_LINK_ERROR=yes -mkdir -p $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/ +mkdir -p $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib %if 0%{?suse_version} ant -Dproduct.name="'$PRODUCT_NAME'" -Dprefix=$RPM_BUILD_ROOT -Dtomcat="tomcat9" install-tomcat9-suse -install -d -m 755 $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/META-INF/ -install -m 755 conf/rhn-tomcat9.xml $RPM_BUILD_ROOT%{serverwwwrdir}/tomcat/webapps/rhn/META-INF/context.xml +install -d -m 755 $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/META-INF/ +install -m 755 conf/rhn-tomcat9.xml $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/META-INF/context.xml %else ant -Dproduct.name="'$PRODUCT_NAME'" -Dprefix=$RPM_BUILD_ROOT install-tomcat install -d -m 755 $RPM_BUILD_ROOT%{_sysconfdir}/tomcat/Catalina/localhost/ @@ -590,7 +587,7 @@ install -m 644 conf/cobbler/snippets/sles_register_script $RPM_BUILD_ROOT%{space install -m 644 conf/cobbler/snippets/sles_no_signature_checks $RPM_BUILD_ROOT%{spacewalksnippetsdir}/sles_no_signature_checks install -m 644 conf/cobbler/snippets/wait_for_networkmanager_script $RPM_BUILD_ROOT%{spacewalksnippetsdir}/wait_for_networkmanager_script -ln -s -f %{_javadir}/dwr.jar $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/dwr.jar +ln -s -f %{_javadir}/dwr.jar $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/dwr.jar # special links for rhn-search RHN_SEARCH_BUILD_DIR=%{_prefix}/share/rhn/search/lib @@ -603,10 +600,10 @@ if [ -e %{_javadir}/ongres-stringprep/stringprep.jar ]; then ln -s -f %{_javadir}/ongres-stringprep/stringprep.jar $RPM_BUILD_ROOT$RHN_SEARCH_BUILD_DIR/ongres-stringprep_stringprep.jar ln -s -f %{_javadir}/ongres-stringprep/saslprep.jar $RPM_BUILD_ROOT$RHN_SEARCH_BUILD_DIR/ongres-stringprep_saslprep.jar echo " -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_stringprep.jar -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_saslprep.jar +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_stringprep.jar +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-stringprep_saslprep.jar %{_prefix}/share/rhn/search/lib/ongres-scram_client.jar %{_prefix}/share/rhn/search/lib/ongres-scram_common.jar %{_prefix}/share/rhn/search/lib/ongres-stringprep_stringprep.jar @@ -614,8 +611,8 @@ if [ -e %{_javadir}/ongres-stringprep/stringprep.jar ]; then " > .mfiles-postgresql else echo " -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_client.jar +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-scram_common.jar %{_prefix}/share/rhn/search/lib/ongres-scram_client.jar %{_prefix}/share/rhn/search/lib/ongres-scram_common.jar " > .mfiles-postgresql @@ -626,10 +623,10 @@ mkdir -p $RPM_BUILD_ROOT%{_docdir}/%{name}/xml install -m 644 build/reports/apidocs/docbook/susemanager_api_doc.xml $RPM_BUILD_ROOT%{_docdir}/%{name}/xml/susemanager_api_doc.xml cp -R build/reports/apidocs/asciidoc/ $RPM_BUILD_ROOT%{_docdir}/%{name}/asciidoc/ # delete JARs which must not be deployed -rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jspapi.jar -rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-compiler.jar -rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-runtime.jar -rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/tomcat*.jar +rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jspapi.jar +rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-compiler.jar +rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jasper5-runtime.jar +rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/tomcat*.jar %if 0%{?omit_tests} > 0 rm -rf $RPM_BUILD_ROOT%{_datadir}/rhn/lib/rhn-test.jar rm -rf $RPM_BUILD_ROOT/classes/com/redhat/rhn/common/conf/test/conf @@ -640,16 +637,16 @@ rm -rf $RPM_BUILD_ROOT%{_datadir}/rhn/unittest.xml mkdir -p $RPM_BUILD_ROOT%{_var}/log/rhn # Prettifying symlinks -mv $RPM_BUILD_ROOT%{servewwwrdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-loggingjboss-logging.jar $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-logging.jar +mv $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-loggingjboss-logging.jar $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/jboss-logging.jar # Removing unused symlinks. %if 0%{?rhel} -rm -rf $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/javamailmail.jar +rm -rf $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/javamailmail.jar %endif # show all JAR symlinks echo "#### SYMLINKS START ####" -find $RPM_BUILD_ROOT%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib -name *.jar +find $RPM_BUILD_ROOT%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib -name *.jar echo "#### SYMLINKS END ####" %pre -n spacewalk-taskomatic @@ -697,41 +694,42 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %files %defattr(-,root,root) +%dir %{susemanagershareddir} +%dir %{serverdir} %dir %{_localstatedir}/lib/spacewalk %defattr(644,tomcat,tomcat,775) %attr(775, %{salt_user_group}, %{salt_user_group}) %dir %{serverdir}/susemanager/salt/salt_ssh %attr(700, %{salt_user_group}, %{salt_user_group}) %dir %{serverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -%dir %{serverwwwdir}/tomcat -%dir %{serverwwwdir}/tomcat/webapps -%attr(775, root, tomcat) %dir %{serverwwwdir}/tomcat/webapps %dir %{serverdir}/susemanager %dir %{serverdir}/susemanager/salt %attr(775,tomcat,susemanager) %dir %{serverdir}/susemanager/pillar_data %attr(775,tomcat,susemanager) %dir %{serverdir}/susemanager/pillar_data/images %dir %{serverdir}/susemanager/formula_data -%attr(770, tomcat, %{salt_user_group}) %dir %{serverwwwdir}/susemanager/tmp -%dir %{serverwwwdir}/tomcat/webapps/rhn/ -%{serverwwwdir}/tomcat/webapps/rhn/apidoc/ -%{serverwwwdir}/tomcat/webapps/rhn/css/ -%{serverwwwdir}/tomcat/webapps/rhn/errata/ -%{serverwwwdir}/tomcat/webapps/rhn/img/ -%{serverwwwdir}/tomcat/webapps/rhn/META-INF/ -%{serverwwwdir}/tomcat/webapps/rhn/schedule/ -%{serverwwwdir}/tomcat/webapps/rhn/systems/ -%{serverwwwdir}/tomcat/webapps/rhn/users/ -%{serverwwwdir}/tomcat/webapps/rhn/errors/ -%{serverwwwdir}/tomcat/webapps/rhn/*.jsp -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/classes -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/decorators -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/includes -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/nav -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/pages -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/*.xml +%attr(770, tomcat, %{salt_user_group}) %dir %{serverdir}/susemanager/tmp +%dir %{serverdir}/tomcat/webapps/rhn/ +%{serverdir}/tomcat/webapps/rhn/apidoc/ +%{serverdir}/tomcat/webapps/rhn/css/ +%{serverdir}/tomcat/webapps/rhn/errata/ +%{serverdir}/tomcat/webapps/rhn/img/ +%{serverdir}/tomcat/webapps/rhn/META-INF/ +%{serverdir}/tomcat/webapps/rhn/schedule/ +%{serverdir}/tomcat/webapps/rhn/systems/ +%{serverdir}/tomcat/webapps/rhn/users/ +%{serverdir}/tomcat/webapps/rhn/errors/ +%{serverdir}/tomcat/webapps/rhn/*.jsp +%{serverdir}/tomcat/webapps/rhn/WEB-INF/classes +%{serverdir}/tomcat/webapps/rhn/WEB-INF/decorators +%{serverdir}/tomcat/webapps/rhn/WEB-INF/includes +%{serverdir}/tomcat/webapps/rhn/WEB-INF/nav +%{serverdir}/tomcat/webapps/rhn/WEB-INF/pages +%{serverdir}/tomcat/webapps/rhn/WEB-INF/*.xml # all jars in WEB-INF/lib/ -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib -%exclude %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar -%exclude %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-*.jar +%dir %{serverdir}/tomcat +%dir %{serverdir}/tomcat/webapps +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib +%exclude %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar +%exclude %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/ongres-*.jar # owned by cobbler needs cobbler permissions %attr(755,root,root) %dir %{cobprofdir} @@ -752,7 +750,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %config %{spacewalksnippetsdir}/sles_no_signature_checks %config %{spacewalksnippetsdir}/wait_for_networkmanager_script %if 0%{?suse_version} -%config(noreplace) %{serverwwwdir}/tomcat/webapps/rhn/META-INF/context.xml +%config(noreplace) %{serverdir}/tomcat/webapps/rhn/META-INF/context.xml %else %config(noreplace) %{_sysconfdir}/tomcat/Catalina/localhost/rhn.xml %endif @@ -760,7 +758,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %attr(755, tomcat, root) %dir %{_localstatedir}/lib/spacewalk/scc %attr(755, tomcat, root) %dir %{_localstatedir}/lib/spacewalk/subscription-matcher -%dir %{serverwwwdir}/tomcat/webapps/rhn/WEB-INF +%dir %{serverdir}/tomcat/webapps/rhn/WEB-INF %files -n spacewalk-taskomatic %defattr(644,root,root,775) @@ -801,7 +799,12 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %defattr(644,root,root,755) %dir %{_prefix}/share/rhn/search %dir %{_prefix}/share/rhn/search/lib -%{serverwwwdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar +%dir %{susemanagershareddir} +%dir %{serverdir} +%{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar %{_prefix}/share/rhn/search/lib/postgresql-jdbc.jar +%defattr(644,tomcat,tomcat,775) +%dir %{serverdir}/tomcat +%dir %{serverdir}/tomcat/webapps %changelog diff --git a/python/spacewalk/spacewalk-backend.spec b/python/spacewalk/spacewalk-backend.spec index e77f8235272b..0f36dfa4d71a 100644 --- a/python/spacewalk/spacewalk-backend.spec +++ b/python/spacewalk/spacewalk-backend.spec @@ -41,7 +41,7 @@ %global apache_user wwwrun %global apache_group www %global apache_pkg apache2 -%global documentroot /srv/www/htdocs +%global documentroot /usr/share/susemanager/www/htdocs %global m2crypto python3-M2Crypto %global sslrootcert %{_sysconfdir}/pki/trust/anchors/ %endif diff --git a/selinux/spacewalk-selinux/spacewalk-selinux-enable b/selinux/spacewalk-selinux/spacewalk-selinux-enable index 0360511e8e86..ced0f7b93a35 100644 --- a/selinux/spacewalk-selinux/spacewalk-selinux-enable +++ b/selinux/spacewalk-selinux/spacewalk-selinux-enable @@ -7,9 +7,9 @@ RUN_PURE= if [ -d /srv/www/htdocs ]; then - WWWDOCROOT=/srv/www/htdocs + PUBROOT=/srv/www/htdocs else - WWWDOCROOT=/var/www/html + PUBROOT=/var/www/html fi while [ -n "$1" ] ; do @@ -51,7 +51,7 @@ for selinuxvariant in mls strict targeted done /sbin/restorecon -rvvi /usr/share/rhn/satidmap.pl /usr/sbin/rhn-sat-restart-silent /var/log/rhn /var/cache/rhn \ - /usr/bin/rhn-sudo-ssl-tool ${WWWDOCROOT}/pub /usr/sbin/tanukiwrapper \ + /usr/bin/rhn-sudo-ssl-tool ${PUBROOT}/pub /usr/sbin/tanukiwrapper \ /var/lib/rhn/kickstarts for sebool in \ diff --git a/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf b/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf index beabb28d1c95..40744d379c75 100644 --- a/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf +++ b/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf @@ -1,7 +1,7 @@ Logformat "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \ \"%r\" %b \"%{Referer}i\" \"%{User-Agent}i\" %>s T%{ms}T" ssl_combined - + Options Indexes FollowSymLinks AllowOverride All diff --git a/spacewalk/setup/lib/Spacewalk/Setup.pm b/spacewalk/setup/lib/Spacewalk/Setup.pm index ee42ec90b278..57a0cddc86f1 100644 --- a/spacewalk/setup/lib/Spacewalk/Setup.pm +++ b/spacewalk/setup/lib/Spacewalk/Setup.pm @@ -107,7 +107,7 @@ use constant DB_MIGRATION_LOG_FILE => use constant EMBEDDED_DB_ANSWERS => '/usr/share/spacewalk/setup/defaults.d/embedded-postgresql.conf'; our $DEFAULT_DOC_ROOT = "/var/www/html"; -our $SUSE_DOC_ROOT = "/srv/www/htdocs"; +our $SUSE_DOC_ROOT = "/usr/share/susemanager/www/htdocs"; our $CA_TRUST_DIR = '/etc/pki/ca-trust/source/anchors'; our $SUSE_CA_TRUST_DIR = '/etc/pki/trust/anchors'; diff --git a/spacewalk/setup/share/add_appbase.xml.xsl b/spacewalk/setup/share/add_appbase.xml.xsl index d241732b4a6e..6a475534ffbc 100644 --- a/spacewalk/setup/share/add_appbase.xml.xsl +++ b/spacewalk/setup/share/add_appbase.xml.xsl @@ -1,23 +1,19 @@ - + - - - - - + + + + + + - - - - - - - - /usr/share/susemanager/www/tomcat/webapps - - - - + + + + /usr/share/susemanager/www/tomcat/webapps + + + diff --git a/spacewalk/setup/share/vhost-nossl.conf b/spacewalk/setup/share/vhost-nossl.conf index cc29f270f3b2..5d3fd58d45ba 100644 --- a/spacewalk/setup/share/vhost-nossl.conf +++ b/spacewalk/setup/share/vhost-nossl.conf @@ -1,7 +1,7 @@ # General setup for the virtual host - DocumentRoot "/srv/www/htdocs" + DocumentRoot "/usr/share/susemanager/www/htdocs" ErrorLog /var/log/apache2/error_log TransferLog /var/log/apache2/access_log diff --git a/susemanager-branding-oss/susemanager-branding-oss.spec b/susemanager-branding-oss/susemanager-branding-oss.spec index 407047406f98..86100e7c74a6 100644 --- a/susemanager-branding-oss/susemanager-branding-oss.spec +++ b/susemanager-branding-oss/susemanager-branding-oss.spec @@ -17,7 +17,7 @@ %if 0%{?suse_version} -%global wwwdocroot /srv/www/htdocs +%global wwwdocroot /usr/share/susemanager/www/htdocs %else %global wwwdocroot %{_localstatedir}/www/html %endif diff --git a/susemanager-utils/susemanager-sls/susemanager-sls.spec b/susemanager-utils/susemanager-sls/susemanager-sls.spec index 54081a0497bd..c13777b48cc2 100644 --- a/susemanager-utils/susemanager-sls/susemanager-sls.spec +++ b/susemanager-utils/susemanager-sls/susemanager-sls.spec @@ -22,7 +22,7 @@ %endif %if 0%{?suse_version} -%global serverdir /srv +%global serverdir /usr/share/susemanager %global wwwdocroot %{serverdir}/www/htdocs %else %global serverdir %{_localstatedir} diff --git a/web/html/Makefile b/web/html/Makefile index d06b61120457..d11948d58150 100644 --- a/web/html/Makefile +++ b/web/html/Makefile @@ -4,7 +4,7 @@ TOP = .. # Project defines -INSTALL_DEST = /srv/www/htdocs +INSTALL_DEST = /usr/share/susemanager/www/htdocs # common stuff include $(TOP)/Makefile.defs diff --git a/web/spacewalk-web.spec b/web/spacewalk-web.spec index f1c8a52e8248..7f6b641f6186 100644 --- a/web/spacewalk-web.spec +++ b/web/spacewalk-web.spec @@ -16,9 +16,10 @@ # Please submit bugfixes or comments via https://bugs.opensuse.org/ # - +%define shared_path /usr/share/susemanager +%define shared_www_path %{shared_path}/www %if 0%{?suse_version} -%define www_path /srv/www/htdocs +%define www_path %{shared_www_path}/htdocs %define apache_user wwwrun %define apache_group www %else @@ -251,6 +252,9 @@ sed -i -e 's/^web.theme_default =.*$/web.theme_default = susemanager-light/' $RP %files -n spacewalk-html -f spacewalk-web.lang %defattr(644,root,root,755) +%dir %{shared_path} +%dir %{shared_www_path} +%dir %{www_path} %dir %{www_path}/css %{www_path}/css/*.{css,js} %dir %{www_path}/css/legacy From bb4a18c4e6e6f5fb7d6d48bd4c4704ad528f08eb Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 20 Sep 2023 09:44:39 +0200 Subject: [PATCH 23/40] black magic for apache --- spacewalk/config/spacewalk-config.spec | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/spacewalk/config/spacewalk-config.spec b/spacewalk/config/spacewalk-config.spec index b36baad0e7d7..0aa8ffc9ba2f 100644 --- a/spacewalk/config/spacewalk-config.spec +++ b/spacewalk/config/spacewalk-config.spec @@ -17,6 +17,10 @@ # +%global susemanager_shared_path /usr/share/susemanager +%global wwwroot %{susemanager_shared_path}/www +%global wwwdocroot %{wwwroot}/htdocs + %if 0%{?suse_version} %define apacheconfdir %{_sysconfdir}/apache2 %define apachepkg apache2 @@ -74,10 +78,11 @@ mv etc $RPM_BUILD_ROOT/ mv var $RPM_BUILD_ROOT/ mv usr $RPM_BUILD_ROOT/ +#TODO invert this logic: the default should be for suse, the if should contains directive for other distros %if 0%{?suse_version} export NO_BRP_STALE_LINK_ERROR=yes mv $RPM_BUILD_ROOT/etc/httpd $RPM_BUILD_ROOT%{apacheconfdir} -sed -i 's|var/www/html|srv/www/htdocs|g' $RPM_BUILD_ROOT%{apacheconfdir}/conf.d/zz-spacewalk-www.conf +sed -i 's|/var/www/html|%{wwwdocroot}|g' $RPM_BUILD_ROOT%{apacheconfdir}/conf.d/zz-spacewalk-www.conf %endif touch $RPM_BUILD_ROOT/%{_sysconfdir}/rhn/rhn.conf From 8d73a98e9c9b919f8fcfa69978aaad4c2ccda7ea Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 20 Sep 2023 09:46:36 +0200 Subject: [PATCH 24/40] suppportconfig extract information from shared folder --- susemanager-utils/supportutils-plugin-susemanager/susemanager | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/susemanager-utils/supportutils-plugin-susemanager/susemanager b/susemanager-utils/supportutils-plugin-susemanager/susemanager index 0216920003d6..bccc6e76a998 100755 --- a/susemanager-utils/supportutils-plugin-susemanager/susemanager +++ b/susemanager-utils/supportutils-plugin-susemanager/susemanager @@ -50,7 +50,7 @@ done plugin_command "/bin/ls -l --time-style=long-iso /usr/local/lib" plugin_command "/bin/ls -l --time-style=long-iso /usr/local/lib64" -plugin_command "find /srv/tomcat/webapps/rhn/WEB-INF/lib/ | xargs file | grep broken" +plugin_command "find /usr/share/susemanager/www/tomcat/webapps/rhn/WEB-INF/lib/ | xargs file | grep broken" plugin_command "find /usr/share/spacewalk/taskomatic/ | xargs file | grep broken" plugin_command "find /usr/share/rhn/search/lib/ | xargs file | grep broken" From 1af3730e734c906403f83f9b17673c76033be255 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 20 Sep 2023 12:04:56 +0200 Subject: [PATCH 25/40] setup apache --- .../localization/LocalizationService.java | 2 +- susemanager/bin/mgr-setup | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java b/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java index 4e6c1735f0a6..af7791b1e7d1 100644 --- a/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java +++ b/java/code/src/com/redhat/rhn/common/localization/LocalizationService.java @@ -62,7 +62,7 @@ public class LocalizationService { */ public static final String RHN_DB_DATEFORMAT = "yyyy-MM-dd HH:mm:ss"; public static final String RHN_CUSTOM_DATEFORMAT = "yyyy-MM-dd HH:mm:ss z"; - private static final String DOC_FOLDER= "/usr/share/susemanager/www/htdocs/docs"; + private static final String DOC_FOLDER = "/usr/share/susemanager/www/htdocs/docs"; private static Logger log = LogManager.getLogger(LocalizationService.class); private static Logger msgLogger = LogManager.getLogger("com.redhat.rhn.common.localization.messages"); diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index dfc90c001315..676624203f00 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -408,6 +408,22 @@ if [ -f $MANAGER_COMPLETE ]; then fi } + + +#TODO most of the apache configuration are set in the perl script, so these changes should be there...but since we want to deprecate perl, the final +# goal would be to move everything here. +# +setup_apache() { + + sed -i 's|DocumentRoot "/srv|DocumentRoot "/usr/share/susemanager|g' /etc/apache2/default-server.conf + sed -i 's|> /root/spacewalk-answers /usr/sbin/mgr-package-rpm-certificate-osimage fi + setup_apache + # rm /root/spacewalk-answers if [ "$SWRET" != "0" ]; then echo "ERROR: spacewalk-setup failed" >&2 From 352ac4af5d65bcbdd0d8dfd990cd6e43aa921fd0 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 20 Sep 2023 19:04:51 +0200 Subject: [PATCH 26/40] fix salt permission --- susemanager/bin/mgr-setup | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 676624203f00..22500a0081d7 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -414,7 +414,6 @@ fi # goal would be to move everything here. # setup_apache() { - sed -i 's|DocumentRoot "/srv|DocumentRoot "/usr/share/susemanager|g' /etc/apache2/default-server.conf sed -i 's|> /root/spacewalk-answers setup_apache + setup_permission + # rm /root/spacewalk-answers if [ "$SWRET" != "0" ]; then echo "ERROR: spacewalk-setup failed" >&2 From dc4722a9cc1138c5fb64d95e8681133f31c87290 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 21 Sep 2023 11:22:16 +0200 Subject: [PATCH 27/40] Revert changes --- java/spacewalk-java.spec | 2 +- python/spacewalk/spacewalk-backend.spec | 2 +- spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf | 2 +- spacewalk/config/spacewalk-config.spec | 6 +----- susemanager/bin/mgr-setup | 1 - 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index 5862e7d4f8b1..1ba0126c4721 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -28,9 +28,9 @@ %define run_checkstyle 0 %define omit_tests 1 +%if 0%{?suse_version} %define susemanagershareddir /usr/share/susemanager %define serverdir %{susemanagershareddir}/www -%if 0%{?suse_version} %define apache_group www %define salt_user_group salt %define apache2 apache2 diff --git a/python/spacewalk/spacewalk-backend.spec b/python/spacewalk/spacewalk-backend.spec index 0f36dfa4d71a..e77f8235272b 100644 --- a/python/spacewalk/spacewalk-backend.spec +++ b/python/spacewalk/spacewalk-backend.spec @@ -41,7 +41,7 @@ %global apache_user wwwrun %global apache_group www %global apache_pkg apache2 -%global documentroot /usr/share/susemanager/www/htdocs +%global documentroot /srv/www/htdocs %global m2crypto python3-M2Crypto %global sslrootcert %{_sysconfdir}/pki/trust/anchors/ %endif diff --git a/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf b/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf index 40744d379c75..beabb28d1c95 100644 --- a/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf +++ b/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf @@ -1,7 +1,7 @@ Logformat "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \ \"%r\" %b \"%{Referer}i\" \"%{User-Agent}i\" %>s T%{ms}T" ssl_combined - + Options Indexes FollowSymLinks AllowOverride All diff --git a/spacewalk/config/spacewalk-config.spec b/spacewalk/config/spacewalk-config.spec index 0aa8ffc9ba2f..bb8d9280ff82 100644 --- a/spacewalk/config/spacewalk-config.spec +++ b/spacewalk/config/spacewalk-config.spec @@ -17,10 +17,6 @@ # -%global susemanager_shared_path /usr/share/susemanager -%global wwwroot %{susemanager_shared_path}/www -%global wwwdocroot %{wwwroot}/htdocs - %if 0%{?suse_version} %define apacheconfdir %{_sysconfdir}/apache2 %define apachepkg apache2 @@ -82,7 +78,7 @@ mv usr $RPM_BUILD_ROOT/ %if 0%{?suse_version} export NO_BRP_STALE_LINK_ERROR=yes mv $RPM_BUILD_ROOT/etc/httpd $RPM_BUILD_ROOT%{apacheconfdir} -sed -i 's|/var/www/html|%{wwwdocroot}|g' $RPM_BUILD_ROOT%{apacheconfdir}/conf.d/zz-spacewalk-www.conf +sed -i 's|var/www/html|srv/www/htdocs|g' $RPM_BUILD_ROOT%{apacheconfdir}/conf.d/zz-spacewalk-www.conf %endif touch $RPM_BUILD_ROOT/%{_sysconfdir}/rhn/rhn.conf diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 22500a0081d7..1b0444c3bc62 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -416,7 +416,6 @@ fi setup_apache() { sed -i 's|DocumentRoot "/srv|DocumentRoot "/usr/share/susemanager|g' /etc/apache2/default-server.conf sed -i 's| Date: Thu, 21 Sep 2023 14:02:54 +0200 Subject: [PATCH 28/40] fix permission --- java/spacewalk-java.spec | 17 +++++++++-------- susemanager/bin/mgr-setup | 2 ++ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index 1ba0126c4721..966761e78206 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -698,14 +698,15 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %dir %{serverdir} %dir %{_localstatedir}/lib/spacewalk %defattr(644,tomcat,tomcat,775) -%attr(775, %{salt_user_group}, %{salt_user_group}) %dir %{serverdir}/susemanager/salt/salt_ssh -%attr(700, %{salt_user_group}, %{salt_user_group}) %dir %{serverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -%dir %{serverdir}/susemanager -%dir %{serverdir}/susemanager/salt -%attr(775,tomcat,susemanager) %dir %{serverdir}/susemanager/pillar_data -%attr(775,tomcat,susemanager) %dir %{serverdir}/susemanager/pillar_data/images -%dir %{serverdir}/susemanager/formula_data -%attr(770, tomcat, %{salt_user_group}) %dir %{serverdir}/susemanager/tmp +%attr(775, %{salt_user_group}, %{salt_user_group}) %dir %{userserverdir}/susemanager/salt/salt_ssh +%attr(700, %{salt_user_group}, %{salt_user_group}) %dir %{userserverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys +%attr(775, tomcat, tomcat) %dir %{serverdir}/tomcat/webapps +%dir %{userserverdir}/susemanager +%dir %{userserverdir}/susemanager/salt +%attr(775,tomcat,susemanager) %dir %{userserverdir}/susemanager/pillar_data +%attr(775,tomcat,susemanager) %dir %{userserverdir}/susemanager/pillar_data/images +%dir %{userserverdir}/susemanager/formula_data +%attr(770, tomcat, %{salt_user_group}) %dir %{userserverdir}/susemanager/tmp %dir %{serverdir}/tomcat/webapps/rhn/ %{serverdir}/tomcat/webapps/rhn/apidoc/ %{serverdir}/tomcat/webapps/rhn/css/ diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 1b0444c3bc62..4d4ece8a4468 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -427,7 +427,9 @@ setup_permission() { mkdir -p /srv/susemanager/salt/custom chown tomcat:susemanager -R /srv/susemanager/salt/tmp chown tomcat:susemanager -R /srv/susemanager/salt/custom + chown tomcat:tomcat -R /usr/share/susemanager/www/htdocs chmod 777 -R /srv/susemanager/salt + chmod 777 -R /usr/share/susemanager/www/htdocs } setup_spacewalk() { From e4ee5be440e7c50279f671d3ffd4e0d59af555b4 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 21 Sep 2023 14:49:49 +0200 Subject: [PATCH 29/40] fix bootstrap folder --- python/spacewalk/spacewalk-backend.spec | 2 +- spacewalk/certs-tools/rhn_bootstrap.py | 3 +-- spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf | 2 +- susemanager/bin/mgr-setup | 1 + 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/spacewalk/spacewalk-backend.spec b/python/spacewalk/spacewalk-backend.spec index e77f8235272b..0f36dfa4d71a 100644 --- a/python/spacewalk/spacewalk-backend.spec +++ b/python/spacewalk/spacewalk-backend.spec @@ -41,7 +41,7 @@ %global apache_user wwwrun %global apache_group www %global apache_pkg apache2 -%global documentroot /srv/www/htdocs +%global documentroot /usr/share/susemanager/www/htdocs %global m2crypto python3-M2Crypto %global sslrootcert %{_sysconfdir}/pki/trust/anchors/ %endif diff --git a/spacewalk/certs-tools/rhn_bootstrap.py b/spacewalk/certs-tools/rhn_bootstrap.py index 8b098b186087..fa69c6775334 100755 --- a/spacewalk/certs-tools/rhn_bootstrap.py +++ b/spacewalk/certs-tools/rhn_bootstrap.py @@ -63,11 +63,10 @@ DEFAULT_CA_CERT_PATH = '/usr/share/rhn/'+CA_CRT_NAME initCFG('server') -DOC_ROOT = CFG.DOCUMENTROOT initCFG('java') -DEFAULT_APACHE_PUB_DIRECTORY = DOC_ROOT + '/pub' +DEFAULT_APACHE_PUB_DIRECTORY = '/srv/www/htdocs/pub' DEFAULT_OVERRIDES = 'client-config-overrides.txt' DEFAULT_SCRIPT = 'bootstrap.sh' diff --git a/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf b/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf index beabb28d1c95..40744d379c75 100644 --- a/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf +++ b/spacewalk/config/etc/httpd/conf.d/zz-spacewalk-www.conf @@ -1,7 +1,7 @@ Logformat "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \ \"%r\" %b \"%{Referer}i\" \"%{User-Agent}i\" %>s T%{ms}T" ssl_combined - + Options Indexes FollowSymLinks AllowOverride All diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 4d4ece8a4468..c87002289ab3 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -416,6 +416,7 @@ fi setup_apache() { sed -i 's|DocumentRoot "/srv|DocumentRoot "/usr/share/susemanager|g' /etc/apache2/default-server.conf sed -i 's| Date: Thu, 21 Sep 2023 17:08:55 +0200 Subject: [PATCH 30/40] rerite rule for pub --- spacewalk/config/etc/httpd/conf.d/public.conf | 10 ++++++++++ spacewalk/config/spacewalk-config.spec | 1 + 2 files changed, 11 insertions(+) create mode 100644 spacewalk/config/etc/httpd/conf.d/public.conf diff --git a/spacewalk/config/etc/httpd/conf.d/public.conf b/spacewalk/config/etc/httpd/conf.d/public.conf new file mode 100644 index 000000000000..77d1a2c7f532 --- /dev/null +++ b/spacewalk/config/etc/httpd/conf.d/public.conf @@ -0,0 +1,10 @@ +Alias /public /srv/www/htdocs/pub + + SetEnv VIRTUALENV + Options Indexes + Require all granted + AllowOverride All + + +RewriteEngine On +RewriteRule ^/pub/(.+)$ /public/$1 [L,PT] diff --git a/spacewalk/config/spacewalk-config.spec b/spacewalk/config/spacewalk-config.spec index bb8d9280ff82..ad51f2ef3b51 100644 --- a/spacewalk/config/spacewalk-config.spec +++ b/spacewalk/config/spacewalk-config.spec @@ -91,6 +91,7 @@ mkdir -p $RPM_BUILD_ROOT/etc/pki/tls/private/ %attr(400,root,root) %config(noreplace) %{_sysconfdir}/rhn/spacewalk-repo-sync/uln.conf %config %{apacheconfdir}/conf.d/zz-spacewalk-www.conf %config %{apacheconfdir}/conf.d/os-images.conf +%config %{apacheconfdir}/conf.d/public.conf %attr(440,root,root) %config %{_sysconfdir}/sudoers.d/spacewalk %dir %{_var}/lib/cobbler/ %dir %{_var}/lib/cobbler/kickstarts/ From 4aaf53142c3052b8bc53b1b2f1ead13b46b84fc7 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 21 Sep 2023 18:06:01 +0200 Subject: [PATCH 31/40] fix salt path --- java/spacewalk-java.spec | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index 966761e78206..b2f783261b20 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -29,6 +29,7 @@ %define omit_tests 1 %if 0%{?suse_version} +%define shareddir /usr/share %define susemanagershareddir /usr/share/susemanager %define serverdir %{susemanagershareddir}/www %define apache_group www @@ -532,14 +533,18 @@ install -d -m 755 $RPM_BUILD_ROOT%{cobdirsnippets} install -d -m 755 $RPM_BUILD_ROOT/%{_localstatedir}/lib/spacewalk/scc install -d -m 755 $RPM_BUILD_ROOT/%{_localstatedir}/lib/spacewalk/subscription-matcher +mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/salt/salt_ssh/temp_bootstrap_keys +mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/pillar_data/images +mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/formula_data +mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/tmp install -d -m 755 $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d -install -d $RPM_BUILD_ROOT%{serverdir}/susemanager/salt -install -d $RPM_BUILD_ROOT%{serverdir}/susemanager/salt/salt_ssh +install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/salt +install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/salt/salt_ssh install -d $RPM_BUILD_ROOT%{serverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -install -d -m 775 $RPM_BUILD_ROOT%{serverdir}/susemanager/pillar_data -install -d -m 775 $RPM_BUILD_ROOT%{serverdir}/susemanager/pillar_data/images -install -d $RPM_BUILD_ROOT%{serverdir}/susemanager/formula_data -install -d $RPM_BUILD_ROOT%{serverdir}/susemanager/tmp +install -d -m 775 $RPM_BUILD_ROOT%{shareddir}/susemanager/pillar_data +install -d -m 775 $RPM_BUILD_ROOT%{shareddir}/susemanager/pillar_data/images +install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/formula_data +install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/tmp install -m 644 conf/default/rhn_hibernate.conf $RPM_BUILD_ROOT%{_prefix}/share/rhn/config-defaults/rhn_hibernate.conf install -m 644 conf/default/rhn_reporting_hibernate.conf $RPM_BUILD_ROOT%{_prefix}/share/rhn/config-defaults/rhn_reporting_hibernate.conf @@ -694,7 +699,6 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %files %defattr(-,root,root) -%dir %{susemanagershareddir} %dir %{serverdir} %dir %{_localstatedir}/lib/spacewalk %defattr(644,tomcat,tomcat,775) @@ -800,7 +804,6 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %defattr(644,root,root,755) %dir %{_prefix}/share/rhn/search %dir %{_prefix}/share/rhn/search/lib -%dir %{susemanagershareddir} %dir %{serverdir} %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar %{_prefix}/share/rhn/search/lib/postgresql-jdbc.jar From 59195aab6d3bb15c939e29bd3e2e0b060d956321 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Fri, 22 Sep 2023 09:28:39 +0200 Subject: [PATCH 32/40] Fix broken cert link --- susemanager-utils/susemanager-sls/susemanager-sls.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/susemanager-utils/susemanager-sls/susemanager-sls.spec b/susemanager-utils/susemanager-sls/susemanager-sls.spec index c13777b48cc2..e200c6734dbc 100644 --- a/susemanager-utils/susemanager-sls/susemanager-sls.spec +++ b/susemanager-utils/susemanager-sls/susemanager-sls.spec @@ -129,7 +129,7 @@ py.test%{?rhel:-3} %post # HACK! Create broken link when it will be replaces with the real file -ln -sf %{wwwdocroot}/pub/RHN-ORG-TRUSTED-SSL-CERT \ +ln -sf /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT \ /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT 2>&1 ||: %posttrans From 03750be38838a636b211080e5605d3dcfe0d467d Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Mon, 2 Oct 2023 10:21:47 +0200 Subject: [PATCH 33/40] move pub apache rule file --- .../config/etc/httpd/conf.d/{public.conf => z-public.conf} | 0 spacewalk/config/spacewalk-config.spec | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename spacewalk/config/etc/httpd/conf.d/{public.conf => z-public.conf} (100%) diff --git a/spacewalk/config/etc/httpd/conf.d/public.conf b/spacewalk/config/etc/httpd/conf.d/z-public.conf similarity index 100% rename from spacewalk/config/etc/httpd/conf.d/public.conf rename to spacewalk/config/etc/httpd/conf.d/z-public.conf diff --git a/spacewalk/config/spacewalk-config.spec b/spacewalk/config/spacewalk-config.spec index ad51f2ef3b51..5cc2323ddb59 100644 --- a/spacewalk/config/spacewalk-config.spec +++ b/spacewalk/config/spacewalk-config.spec @@ -91,7 +91,7 @@ mkdir -p $RPM_BUILD_ROOT/etc/pki/tls/private/ %attr(400,root,root) %config(noreplace) %{_sysconfdir}/rhn/spacewalk-repo-sync/uln.conf %config %{apacheconfdir}/conf.d/zz-spacewalk-www.conf %config %{apacheconfdir}/conf.d/os-images.conf -%config %{apacheconfdir}/conf.d/public.conf +%config %{apacheconfdir}/conf.d/z-public.conf %attr(440,root,root) %config %{_sysconfdir}/sudoers.d/spacewalk %dir %{_var}/lib/cobbler/ %dir %{_var}/lib/cobbler/kickstarts/ From a5bc681e6c108c1ea25dd4e39089faae83bb5573 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Mon, 2 Oct 2023 12:49:56 +0200 Subject: [PATCH 34/40] move empty and empty-deb repo --- susemanager/empty-repo.conf | 20 ++++++++++++++++++-- susemanager/susemanager.spec | 4 +++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/susemanager/empty-repo.conf b/susemanager/empty-repo.conf index 82b468721cff..2d5b4e5365ca 100644 --- a/susemanager/empty-repo.conf +++ b/susemanager/empty-repo.conf @@ -1,2 +1,18 @@ -RewriteRule ^/pub/repositories/empty/(.*)$ /gpg/repositories/empty/$1 [L,PT] -RewriteRule ^/pub/repositories/empty-deb/(.*)$ /gpg/repositories/empty-deb/$1 [L,PT] +Alias /empty /usr/share/susemanager/www/pub/repositories/empty +Alias /empty-deb /usr/share/susemanager/www/pub/repositories/empty-deb + + + + SetEnv VIRTUALENV + Options Indexes + Require all granted + + + + SetEnv VIRTUALENV + Options Indexes + Require all granted + + +RewriteRule ^/pub/repositories/empty/(.*)$ /empty/$1 [L,PT] +RewriteRule ^/pub/repositories/empty-deb/(.*)$ /empty-deb/$1 [L,PT] diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index b98bbc3c2ce0..45987080cb13 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -42,7 +42,8 @@ %global wwwroot %{serverdir}/www %endif -%global reporoot %{_datarootdir}/susemanager/gpg/ +%global wwwroot %{_datarootdir}/susemanager/www +%global reporoot %{wwwroot}/pub %global debug_package %{nil} @@ -294,6 +295,7 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{pythonsmroot}/susemanager %dir %{_prefix}/share/rhn/ %dir %{_datadir}/susemanager +%dir %{wwwroot} %dir %{reporoot} %dir %{reporoot}/repositories %dir %{reporoot}/repositories/empty From 8a2a41711eafea3ce1cabea7996b94fddfef7a83 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 3 Oct 2023 09:35:45 +0200 Subject: [PATCH 35/40] fix permission --- susemanager/bin/mgr-setup | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index c87002289ab3..38a2c7296ef9 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -425,12 +425,19 @@ setup_apache() { #TODO move this on the permission check function setup_permission() { mkdir -p /srv/susemanager/salt/tmp + mkdir -p /srv/susemanager/tmp mkdir -p /srv/susemanager/salt/custom + mkdir -p /srv/susemanager/salt/salt_ssh + mkdir -p /srv/susemanager/pillar_data/images chown tomcat:susemanager -R /srv/susemanager/salt/tmp chown tomcat:susemanager -R /srv/susemanager/salt/custom chown tomcat:tomcat -R /usr/share/susemanager/www/htdocs chmod 777 -R /srv/susemanager/salt + chmod 777 -R /srv/susemanager/tmp + chmod 777 -R /srv/susemanager/salt/custom chmod 777 -R /usr/share/susemanager/www/htdocs + chmod 777 -R /srv/susemanager/salt/salt_ssh + chmod 777 -R /srv/susemanager/pillar_data/images } setup_spacewalk() { From 88e82e95fed5e58f769200ac36f67cd9319ab8cb Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 4 Oct 2023 11:57:06 +0200 Subject: [PATCH 36/40] fix permission --- java/spacewalk-java.spec | 23 ++++++++++++----------- susemanager/bin/mgr-setup | 12 ------------ 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index b2f783261b20..e6f44c6a4882 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -32,6 +32,7 @@ %define shareddir /usr/share %define susemanagershareddir /usr/share/susemanager %define serverdir %{susemanagershareddir}/www +%define userserverdir /srv %define apache_group www %define salt_user_group salt %define apache2 apache2 @@ -533,18 +534,18 @@ install -d -m 755 $RPM_BUILD_ROOT%{cobdirsnippets} install -d -m 755 $RPM_BUILD_ROOT/%{_localstatedir}/lib/spacewalk/scc install -d -m 755 $RPM_BUILD_ROOT/%{_localstatedir}/lib/spacewalk/subscription-matcher -mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/pillar_data/images -mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/formula_data -mkdir -p $RPM_BUILD_ROOT%{shareddir}/susemanager/tmp +mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys +mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/pillar_data/images +mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/formula_data +mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/tmp install -d -m 755 $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d -install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/salt -install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/salt/salt_ssh -install -d $RPM_BUILD_ROOT%{serverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -install -d -m 775 $RPM_BUILD_ROOT%{shareddir}/susemanager/pillar_data -install -d -m 775 $RPM_BUILD_ROOT%{shareddir}/susemanager/pillar_data/images -install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/formula_data -install -d $RPM_BUILD_ROOT%{shareddir}/susemanager/tmp +install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt +install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt/salt_ssh +install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys +install -d -m 775 $RPM_BUILD_ROOT%{userserverdir}/susemanager/pillar_data +install -d -m 775 $RPM_BUILD_ROOT%{userserverdir}/susemanager/pillar_data/images +install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/formula_data +install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/tmp install -m 644 conf/default/rhn_hibernate.conf $RPM_BUILD_ROOT%{_prefix}/share/rhn/config-defaults/rhn_hibernate.conf install -m 644 conf/default/rhn_reporting_hibernate.conf $RPM_BUILD_ROOT%{_prefix}/share/rhn/config-defaults/rhn_reporting_hibernate.conf diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 38a2c7296ef9..5db3d4c48c25 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -424,20 +424,8 @@ setup_apache() { #TODO move this on the permission check function setup_permission() { - mkdir -p /srv/susemanager/salt/tmp - mkdir -p /srv/susemanager/tmp - mkdir -p /srv/susemanager/salt/custom - mkdir -p /srv/susemanager/salt/salt_ssh - mkdir -p /srv/susemanager/pillar_data/images - chown tomcat:susemanager -R /srv/susemanager/salt/tmp - chown tomcat:susemanager -R /srv/susemanager/salt/custom chown tomcat:tomcat -R /usr/share/susemanager/www/htdocs - chmod 777 -R /srv/susemanager/salt - chmod 777 -R /srv/susemanager/tmp - chmod 777 -R /srv/susemanager/salt/custom chmod 777 -R /usr/share/susemanager/www/htdocs - chmod 777 -R /srv/susemanager/salt/salt_ssh - chmod 777 -R /srv/susemanager/pillar_data/images } setup_spacewalk() { From a589fd8ec62f66f9f732ee329c73de37c55cd221 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 5 Oct 2023 11:35:02 +0200 Subject: [PATCH 37/40] fixup! fix permission --- susemanager/susemanager.spec | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index 45987080cb13..fc909a411ff9 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -42,8 +42,7 @@ %global wwwroot %{serverdir}/www %endif -%global wwwroot %{_datarootdir}/susemanager/www -%global reporoot %{wwwroot}/pub +%global reporoot %{_datarootdir}/susemanager/www/pub %global debug_package %{nil} From 1fa2ea31050beeb60ab68f6983167ce8a744e5e5 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 5 Oct 2023 11:49:17 +0200 Subject: [PATCH 38/40] fixup! fixup! fix permission --- java/spacewalk-java.spec | 4 ---- 1 file changed, 4 deletions(-) diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index e6f44c6a4882..3184acd9f786 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -534,10 +534,6 @@ install -d -m 755 $RPM_BUILD_ROOT%{cobdirsnippets} install -d -m 755 $RPM_BUILD_ROOT/%{_localstatedir}/lib/spacewalk/scc install -d -m 755 $RPM_BUILD_ROOT/%{_localstatedir}/lib/spacewalk/subscription-matcher -mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt/salt_ssh/temp_bootstrap_keys -mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/pillar_data/images -mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/formula_data -mkdir -p $RPM_BUILD_ROOT%{userserverdir}/susemanager/tmp install -d -m 755 $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt install -d $RPM_BUILD_ROOT%{userserverdir}/susemanager/salt/salt_ssh From 35d480b3454dea37ccfa93ecf822c6b7b469751e Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 5 Oct 2023 12:24:24 +0200 Subject: [PATCH 39/40] fixup! fixup! fixup! fix permission --- java/spacewalk-java.spec | 2 ++ 1 file changed, 2 insertions(+) diff --git a/java/spacewalk-java.spec b/java/spacewalk-java.spec index 3184acd9f786..65b7e8ae8d5e 100644 --- a/java/spacewalk-java.spec +++ b/java/spacewalk-java.spec @@ -696,6 +696,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %files %defattr(-,root,root) +%dir %{susemanagershareddir} %dir %{serverdir} %dir %{_localstatedir}/lib/spacewalk %defattr(644,tomcat,tomcat,775) @@ -802,6 +803,7 @@ chown tomcat:%{apache_group} /var/log/rhn/gatherer.log %dir %{_prefix}/share/rhn/search %dir %{_prefix}/share/rhn/search/lib %dir %{serverdir} +%dir %{susemanagershareddir} %{serverdir}/tomcat/webapps/rhn/WEB-INF/lib/postgresql-jdbc.jar %{_prefix}/share/rhn/search/lib/postgresql-jdbc.jar %defattr(644,tomcat,tomcat,775) From 0f424498b924061253a3e77b89c1d4ab77c7b18f Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 5 Oct 2023 12:26:50 +0200 Subject: [PATCH 40/40] fixup! fixup! fixup! fixup! fix permission --- susemanager/susemanager.spec | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index fc909a411ff9..6e29fa985a4e 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -42,7 +42,8 @@ %global wwwroot %{serverdir}/www %endif -%global reporoot %{_datarootdir}/susemanager/www/pub +%global sharedwwwroot %{_datarootdir}/susemanager/www +%global reporoot %{sharedwwwroot}/pub %global debug_package %{nil} @@ -295,6 +296,7 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{_prefix}/share/rhn/ %dir %{_datadir}/susemanager %dir %{wwwroot} +%dir %{sharedwwwroot} %dir %{reporoot} %dir %{reporoot}/repositories %dir %{reporoot}/repositories/empty