From 3ece6473de48bc7a76c9eb6f806ee38ca09d4d7c Mon Sep 17 00:00:00 2001 From: Brian Mangoenpawiro Date: Thu, 12 Sep 2024 16:44:36 +0200 Subject: [PATCH 01/25] Add release workflow (#332) * Add release workflow Signed-off-by: bmangoen * Use github.actor vars Signed-off-by: bmangoen * Workflow in the right directory Signed-off-by: bmangoen --------- Signed-off-by: bmangoen --- .github/workflows/release.yaml | 89 ++++++++++++++++++++++++++++++ hack/operatorhub/publish-bundle.sh | 11 ++++ 2 files changed, 100 insertions(+) create mode 100644 .github/workflows/release.yaml diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..89a72465c --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,89 @@ +name: Release workflow + +on: + workflow_dispatch: + inputs: + release_version: + description: "Release version" + required: true + bundle_channels: + description: "Bundle channels" + required: true + default: "candidates" + is_draft_release: + description: "Draft release" + type: boolean + required: false + default: true + is_pre_release: + description: "Pre-release" + type: boolean + required: false + default: false + +run-name: Release ${{ inputs.release_version }} + +env: + GIT_USER: ${{ secrets.GIT_USER }} + GITHUB_TOKEN: ${{ secrets.GIT_TOKEN }} + VERSION: ${{ inputs.release_version }} + +jobs: + release: + runs-on: ubuntu-latest + + steps: + - name: Login to quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USER }} + password: ${{ secrets.QUAY_PWD }} + + - uses: actions/checkout@v4 + + - name: Build and push operator image + run: | + make docker-buildx \ + -e TAG=$VERSION + + - name: Generate bundle metadata + run: | + make bundle \ + -e CHANNELS=$CHANNELS + env: + CHANNELS: ${{ inputs.bundle_channels }} + + - name: Publish bundle in operatorhub.io + run: | + make bundle-publish \ + -e GIT_CONFIG_USER_NAME="$GIT_CONFIG_USER_NAME" \ + -e GIT_CONFIG_USER_EMAIL="$GIT_CONFIG_USER_EMAIL" \ + -e OPERATOR_VERSION=$VERSION \ + -e OPERATOR_HUB=community-operators \ + -e OWNER=k8s-operatorhub \ + -e FORK=maistra + env: + GIT_CONFIG_USER_NAME: "${{ github.actor }}" + GIT_CONFIG_USER_EMAIL: "${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com" + + - name: Publish bundle in OpenShift OperatorHub + run: | + make bundle-publish \ + -e GIT_CONFIG_USER_NAME="$GIT_CONFIG_USER_NAME" \ + -e GIT_CONFIG_USER_EMAIL="$GIT_CONFIG_USER_EMAIL" \ + -e OPERATOR_VERSION=$VERSION \ + -e OWNER=redhat-openshift-ecosystem \ + -e FORK=maistra + env: + GIT_CONFIG_USER_NAME: "${{ github.actor }}" + GIT_CONFIG_USER_EMAIL: "${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com" + + - name: Create GitHub release + run: | + make create-gh-release \ + -e GH_PRE_RELEASE=$GH_PRE_RELEASE \ + -e GH_RELEASE_DRAFT=$GH_RELEASE_DRAFT + env: + GH_PRE_RELEASE: ${{ github.event.inputs.is_pre_release == 'true' }} + GH_RELEASE_DRAFT: ${{ github.event.inputs.is_draft_release == 'true' }} \ No newline at end of file diff --git a/hack/operatorhub/publish-bundle.sh b/hack/operatorhub/publish-bundle.sh index 59fbe2785..e72e12858 100755 --- a/hack/operatorhub/publish-bundle.sh +++ b/hack/operatorhub/publish-bundle.sh @@ -23,6 +23,9 @@ source "${CUR_DIR}"/../validate_semver.sh GITHUB_TOKEN="${GITHUB_TOKEN:-}" GIT_USER="${GIT_USER:-}" +GIT_CONFIG_USER_NAME="${GIT_CONFIG_USER_NAME:-}" +GIT_CONFIG_USER_EMAIL="${GIT_CONFIG_USER_EMAIL:-}" + # The OPERATOR_NAME is defined in Makefile : "${OPERATOR_NAME:?"Missing OPERATOR_NAME variable"}" : "${OPERATOR_VERSION:?"Missing OPERATOR_VERSION variable"}" @@ -96,6 +99,14 @@ BUNDLE_DIR="${CUR_DIR}"/../../bundle mkdir -p "${OPERATORS_DIR}" cp -a "${BUNDLE_DIR}"/. "${OPERATORS_DIR}" +if ! git config --global user.name; then + skipInDryRun git config --global user.name "${GIT_CONFIG_USER_NAME}" +fi + +if ! git config --global user.email; then + skipInDryRun git config --global user.email "${GIT_CONFIG_USER_EMAIL}" +fi + TITLE="operator ${OPERATOR_NAME} (${OPERATOR_VERSION})" skipInDryRun git add . skipInDryRun git commit -s -m"${TITLE}" From 468f457c70f68054bc1ad7a50847513c4ea9dd24 Mon Sep 17 00:00:00 2001 From: Sridhar Gaddam Date: Mon, 16 Sep 2024 20:12:15 +0530 Subject: [PATCH 02/25] Fix setup-multi-primary script (#340) Currently, running the script simply hangs and there was also a typo in the apiVersion. This PR fixes both the issues. Signed-off-by: Sridhar Gaddam --- docs/multicluster/setup-multi-primary.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/multicluster/setup-multi-primary.sh b/docs/multicluster/setup-multi-primary.sh index 4463c3719..53589089f 100755 --- a/docs/multicluster/setup-multi-primary.sh +++ b/docs/multicluster/setup-multi-primary.sh @@ -25,6 +25,7 @@ while [ $# -gt 0 ]; do exit 0 ;; esac + shift done set -euo pipefail @@ -96,7 +97,7 @@ kubectl get secret -n istio-system --context "${CTX_CLUSTER2}" cacerts || kubect # 4. Create Sail CR on east kubectl apply --context "${CTX_CLUSTER1}" -f - < Date: Tue, 17 Sep 2024 11:48:15 +0300 Subject: [PATCH 03/25] Add README file for Helm charts section (#338) - Add instructions on deploying the sail-operator by using the helm charts defined within the repository. - Rearrange the following guides to avoid duplication. Place them within 'docs/common' directory and reference them from the relevant docs guides. - create-and-configure-gateways.md - install-bookinfo-app.md - install-istioctl-tool.md - istio-addons-integrations.md Signed-off-by: Maxim Babushkin --- bundle/README.md | 262 +------------------ chart/README.md | 248 ++++++++++++++++++ docs/README.md | 4 +- docs/common/create-and-configure-gateways.md | 84 ++++++ docs/common/install-bookinfo-app.md | 30 +++ docs/common/install-istioctl-tool.md | 52 ++++ docs/common/istio-addons-integrations.md | 119 +++++++++ 7 files changed, 539 insertions(+), 260 deletions(-) create mode 100644 chart/README.md create mode 100644 docs/common/create-and-configure-gateways.md create mode 100644 docs/common/install-bookinfo-app.md create mode 100644 docs/common/install-istioctl-tool.md create mode 100644 docs/common/istio-addons-integrations.md diff --git a/bundle/README.md b/bundle/README.md index e2878edf1..50aaf7008 100644 --- a/bundle/README.md +++ b/bundle/README.md @@ -151,53 +151,7 @@ Alternatively, refer to [Istio's artifacthub chart documentation](https://artifa The `istioctl` tool is a configuration command line utility that allows service operators to debug and diagnose Istio service mesh deployments. - -### Prerequisites - -Use an `istioctl` version that is the same version as the Istio control plane -for the Service Mesh deployment. See [Istio Releases](https://github.com/istio/istio/releases) for a list of valid -releases, including Beta releases. - - -### Procedure - -1. Confirm if you have `istioctl` installed, and if so which version, by running -the following command at the terminal: - - ```sh - $ istioctl version - ``` - -1. Confirm the version of Istio you are using by running the following command -at the terminal: - - ```sh - $ oc -n istio-system get istio - ``` - -1. Install `istioctl` by running the following command at the terminal: - - ```sh - $ curl -sL https://istio.io/downloadIstioctl | ISTIO_VERSION= sh - - ``` - Replace `` with the version of Istio you are using. - -1. Put the `istioctl` directory on path by running the following command at the terminal: - - ```sh - $ export PATH=$HOME/.istioctl/bin:$PATH - ``` - -1. Confirm that the `istioctl` client version and the Istio control plane -version now match (or are within one version) by running the following command -at the terminal: - - ```sh - $ istioctl version - ``` - - -*Note*: `istioctl install` is not supported. The Sail Operator installs Istio. +For installation steps, refer to the following [link](../docs/common/install-istioctl-tool.md). ## Installing the Bookinfo Application @@ -205,30 +159,7 @@ You can use the `bookinfo` example application to explore service mesh features. Using the `bookinfo` application, you can easily confirm that requests from a web browser pass through the mesh and reach the application. -The `bookinfo` application displays information about a book, similar to a -single catalog entry of an online book store. The application displays a page -that describes the book, lists book details (ISBN, number of pages, and other -information), and book reviews. - -The `bookinfo` application is exposed through the mesh, and the mesh configuration -determines how the microservices comprising the application are used to serve -requests. The review information comes from one of three services: `reviews-v1`, -`reviews-v2` or `reviews-v3`. If you deploy the `bookinfo` application without -defining the `reviews` virtual service, then the mesh uses a round-robin rule to -route requests to a service. - -By deploying the `reviews` virtual service, you can specify a different behavior. -For example, you can specify that if a user logs into the `bookinfo` application, -then the mesh routes requests to the `reviews-v2` service, and the application -displays reviews with black stars. If a user does not log into the `bookinfo` -application, then the mesh routes requests to the `reviews-v3` service, and the -application displays reviews with red stars. - -For more information, see [Bookinfo Application](https://istio.io/latest/docs/examples/bookinfo/) in the upstream Istio documentation. - -After following the instructions for [Deploying the application](https://istio.io/latest/docs/examples/bookinfo/#start-the-application-services), **you -will need to create and configure a gateway** for the `bookinfo` application to -be accessible outside the cluster. +For installation steps, refer to the following [link](../docs/common/install-bookinfo-app.md). ## Creating and Configuring Gateways @@ -240,81 +171,7 @@ contains the control plane. You can deploy gateways using either the Gateway API or Gateway Injection methods. - -### Option 1: Istio Gateway Injection - -Gateway Injection uses the same mechanisms as Istio sidecar injection to create -a gateway from a `Deployment` resource that is paired with a `Service` resource -that can be made accessible from outside the cluster. For more information, see -[Installing Gateways](https://preliminary.istio.io/latest/docs/setup/additional-setup/gateway/#deploying-a-gateway). - -To configure gateway injection with the `bookinfo` application, we have provided -a [sample gateway configuration](../chart/samples/ingress-gateway.yaml?raw=1) that should be applied in the namespace -where the application is installed: - -1. Create the `istio-ingressgateway` deployment and service: - - ```sh - $ oc apply -f -n ingress-gateway.yaml - ``` - -2. Configure the `bookinfo` application with the new gateway: - - ```sh - $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/bookinfo-gateway.yaml - ``` - -3. On OpenShift, you can use a [Route](https://docs.openshift.com/container-platform/4.13/networking/routes/route-configuration.html) to expose the gateway externally: - - ```sh - $ oc expose service istio-ingressgateway - ``` - -4. Finally, obtain the gateway host name and the URL of the product page: - - ```sh - $ HOST=$(oc get route istio-ingressgateway -o jsonpath='{.spec.host}') - $ echo http://$HOST/productpage - ``` - -Verify that the `productpage` is accessible from a web browser. - - -### Option 2: Kubernetes Gateway API - -Istio includes support for Kubernetes [Gateway API](https://gateway-api.sigs.k8s.io/) and intends to make it -the default API for [traffic management in the future](https://istio.io/latest/blog/2022/gateway-api-beta/). For more -information, see Istio's [Kubernetes Gateway API](https://istio.io/latest/docs/tasks/traffic-management/ingress/gateway-api/) page. - -As of Kubernetes 1.28 and OpenShift 4.14, the Kubernetes Gateway API CRDs are -not available by default and must be enabled to be used. This can be done with -the command: - -```sh -$ oc get crd gateways.gateway.networking.k8s.io &> /dev/null || { oc kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.0.0" | oc apply -f -; } -``` - -To configure `bookinfo` with a gateway using `Gateway API`: - -1. Create and configure a gateway using a `Gateway` and `HTTPRoute` resource: - - ```sh - $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/gateway-api/bookinfo-gateway.yaml - ``` - -2. Retrieve the host, port and gateway URL: - - ```sh - $ export INGRESS_HOST=$(oc get gtw bookinfo-gateway -o jsonpath='{.status.addresses[0].value}') - $ export INGRESS_PORT=$(oc get gtw bookinfo-gateway -o jsonpath='{.spec.listeners[?(@.name=="http")].port}') - $ export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT - ``` - -3. Obtain the `productpage` URL and check that you can visit it from a browser: - - ```sh - $ echo "http://${GATEWAY_URL}/productpage" - ``` +For installation steps, refer to the following [link](../docs/common/create-and-configure-gateways.md). ## Istio Addons Integrations @@ -324,118 +181,7 @@ Istio can be integrated with other software to provide additional functionality The following addons are for demonstration or development purposes only and should not be used in production environments: - -### Prometheus - -`Prometheus` is an open-source systems monitoring and alerting toolkit. You can -use `Prometheus` with the Sail Operator to keep an eye on how healthy Istio and -the apps in the service mesh are, for more information, see [Prometheus](https://istio.io/latest/docs/ops/integrations/prometheus/). - -To install Prometheus, perform the following steps: - -1. Deploy `Prometheus`: - - ```sh - $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/prometheus.yaml - ``` -2. Access to `Prometheus`console: - - * Expose the `Prometheus` service externally: - - ```sh - $ oc expose service prometheus -n istio-system - ``` - * Get the route of the service and open the URL in the web browser - - ```sh - $ oc get route prometheus -o jsonpath='{.spec.host}' -n istio-system - ``` - - -### Grafana - -`Grafana` is an open-source platform for monitoring and observability. You can -use `Grafana` with the Sail Operator to configure dashboards for istio, see -[Grafana](https://istio.io/latest/docs/ops/integrations/grafana/) for more information. - -To install Grafana, perform the following steps: - -1. Deploy `Grafana`: - - ```sh - $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/grafana.yaml - ``` - -2. Access to `Grafana`console: - - * Expose the `Grafana` service externally - - ```sh - $ oc expose service grafana -n istio-system - ``` - * Get the route of the service and open the URL in the web browser - - ```sh - $ oc get route grafana -o jsonpath='{.spec.host}' -n istio-system - ``` - - -### Jaeger - -`Jaeger` is an open-source end-to-end distributed tracing system. You can use -`Jaeger` with the Sail Operator to monitor and troubleshoot transactions in -complex distributed systems, see [Jaeger](https://istio.io/latest/docs/ops/integrations/jaeger/) for more information. - -To install Jaeger, perform the following steps: - -1. Deploy `Jaeger`: - - ```sh - $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/jaeger.yaml - ``` -2. Access to `Jaeger` console: - - * Expose the `Jaeger` service externally: - - ```sh - $ oc expose svc/tracing -n istio-system - ``` - - * Get the route of the service and open the URL in the web browser - - ```sh - $ oc get route tracing -o jsonpath='{.spec.host}' -n istio-system - ``` -*Note*: if you want to see some traces you can refresh several times the product -page of bookinfo app to start generating traces. - - -### Kiali - -`Kiali` is an open-source project that provides a graphical user interface to -visualize the service mesh topology, see [Kiali](https://istio.io/latest/docs/ops/integrations/kiali/) for more information. - -To install Kiali, perform the following steps: - -1. Deploy `Kiali`: - - ```sh - $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/kiali.yaml - ``` - -2. Access to `Kiali` console: - - * Expose the `Kiali` service externally: - - ```sh - $ oc expose service kiali -n istio-system - ``` - - * Get the route of the service and open the URL in the web browser - - ```sh - $ oc get route kiali -o jsonpath='{.spec.host}' -n istio-system - ``` +For installation steps, refer to the following [link](../docs/common/istio-addons-integrations.md). ## Undeploying Istio and the Sail Operator diff --git a/chart/README.md b/chart/README.md new file mode 100644 index 000000000..c2e4cf792 --- /dev/null +++ b/chart/README.md @@ -0,0 +1,248 @@ +# Deploy Sail Operator by using Helm charts + +Follow this guide to install and configure Sail Operator by using [Helm](https://helm.sh/docs/) + +## Prerequisites + +Kubernetes: +* You have deployed a cluster on Kubernetes platform 1.27 or later. +* You are logged in to the Kubernetes cluster with admin permissions level user. + +OpenShift: +* You have deployed a cluster on OpenShift Container Platform 4.14 or later. +* You are logged in to the OpenShift Container Platform web console as a user with the `cluster-admin` role. + +[Install the Helm client](https://helm.sh/docs/intro/install/), version 3.6 or above. + +## Prepare the Helm charts + +**Note** - `Sail Operator` could be installed by downloading the release artifacts from the [release page](https://github.com/istio-ecosystem/sail-operator/releases). + +* Download the required release artifact +* Extract it locally. + + ```sh + $ tar -xvf /tmp/sail-operator-.tgz + ``` + +The extract command will create the `sail-operator` directory with the helm charts in it. + +## Installation steps + +This section describes the procedure to install `Sail Operator` using Helm. The general syntax for helm installation is: + + ```sh + helm install --create-namespace --namespace [--set ] + ``` + +The variables specified in the command are as follows: +* `` - A name to identify and manage the Helm chart once installed. +* `` - A path to a packaged chart, a path to an unpacked chart directory or a URL. +* `` - The namespace in which the chart is to be installed. + +Default configuration values can be changed using one or more `--set =` arguments. Alternatively, you can specify several parameters in a custom values file using the `--values ` argument. + +1. Create the namespace, `sail-operator`, for the Sail Operator components: + + ```sh + $ kubectl create namespace sail-operator + ``` + +**Note** - This step could be skipped by using the `--create-namespace` argument in step 2. + +2. Install the Sail Operator base charts which will manage all the Custom Resource Definitions(CRDs) to be able to deploy the Istio control plane: + +* Kubernetes + + ```sh + $ helm install sail-operator sail-operator/ --namespace sail-operator + ``` + +* OpenShift + + ```sh + $ helm install sail-operator sail-operator/ --namespace sail-operator --set platform=openshift + ``` + +3. Validate the CRD installation with the `helm ls` command: + + ```sh + $ helm ls -n sail-operator + + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + sail-operator sail-operator 1 2024-09-16 12:43:18.786846217 +0300 IDT deployed sail-operator-0.1.0-rc.1 0.1.0-rc.1 + ``` + +4. Get the status of the installed helm chart to ensure it is deployed: + + ```bash + $ helm status sail-operator -n sail-operator + + NAME: sail-operator + LAST DEPLOYED: Mon Sep 16 12:43:18 2024 + NAMESPACE: sail-operator + STATUS: deployed + REVISION: 1 + TEST SUITE: None + ``` + +5. Check `sail-operator` deployment is successfully installed and its pods are running: + + ```sh + $ kubectl -n sail-operator get deployment --output wide + + NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + sail-operator 1/1 1 1 19m kube-rbac-proxy,manager gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0,quay.io/maistra-dev/sail-operator:0.1.0-rc.1 app.kubernetes.io/created-by=sailoperator,app.kubernetes.io/part-of=sailoperator,control-plane=sail-operator + + $ kubectl -n sail-operator get pods -o wide + + NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + sail-operator-666f84b6f4-9hw4t 2/2 Running 0 43s 10.244.0.8 sail-control-plane + ``` + +## Deploying Istio + +To deploy Istio, you must create the following resources: +* `Istio`. +* If you are using OpenShift, the `IstioCNI` must also be created. + +The `Istio` resource deploys and configures the Istio Control Plane, whereas the `IstioCNI` resource (in OpenShift) deploys and configures the Istio CNI plugin. You should create these resources in separate projects. + +### Create a namespace for Istio project. + +* Kubernetes + + ```sh + $ kubectl create namespace istio-system + ``` + +* OpenShift + + ```sh + $ kubectl create namespace istio-system + $ kubectl create namespace istio-cni + ``` + +### Create the Istio resource + +The `sail-operator` charts directory contains `samples` directory, which contains manifests that could be used for Istio deployment. + +* Kubernetes + + ```sh + $ kubectl apply -f sail-operator/samples/istio-sample-kubernetes.yaml + ``` + +* OpenShift + + ```sh + $ kubectl apply -f sail-operator/samples/istio-sample-openshift.yaml + $ kubectl apply -f sail-operator/samples/istiocni-sample.yaml + ``` + +**Note** - The version can be specified by modifying the `version` field within `Istio` and `IstioCNI` manifests. + +### Customizing Istio configuration + +The `spec.values` field of the `Istio` and `IstioCNI` resource can be used to customize Istio and Istio CNI plugin configuration using Istio's `Helm` configuration values. + +An example configuration: + + ```yaml + apiVersion: sailoperator.io/v1alpha1 + kind: Istio + metadata: + name: example + spec: + version: v1.23.0 + values: + global: + mtls: + enabled: true + trustDomainAliases: + - example.net + meshConfig: + trustDomain: example.com + trustDomainAliases: + - example.net + ``` + +For a list of available configuration for the `spec.values` field, run the following command: + + ```sh + $ kubectl explain istio.spec.values + ``` + +For the `IstioCNI` resource, replace `istio` with `istiocni` in the command above. + +Alternatively, refer to [Istio's artifacthub chart documentation](https://artifacthub.io/packages/search?org=istio&sort=relevance&page=1) for: + +- [Base parameters](https://artifacthub.io/packages/helm/istio-official/base?modal=values) +- [Istiod parameters](https://artifacthub.io/packages/helm/istio-official/istiod?modal=values) +- [Gateway parameters](https://artifacthub.io/packages/helm/istio-official/gateway?modal=values) +- [CNI parameters](https://artifacthub.io/packages/helm/istio-official/cni?modal=values) +- [ZTunnel parameters](https://artifacthub.io/packages/helm/istio-official/ztunnel?modal=values) + +## Installing the istioctl tool + +The `istioctl` tool is a configuration command line utility that allows service +operators to debug and diagnose Istio service mesh deployments. + +For installation steps, refer to the following [link](../docs/common/install-istioctl-tool.md). + +## Installing the Bookinfo Application + +You can use the `bookinfo` example application to explore service mesh features. +Using the `bookinfo` application, you can easily confirm that requests from a +web browser pass through the mesh and reach the application. + +For installation steps, refer to the following [link](../docs/common/install-bookinfo-app.md). + +## Creating and Configuring Gateways + +The Sail Operator does not deploy Ingress or Egress Gateways. Gateways are not +part of the control plane. As a security best-practice, Ingress and Egress +Gateways should be deployed in a different namespace than the namespace that +contains the control plane. + +You can deploy gateways using either the Gateway API or Gateway Injection methods. + +For installation steps, refer to the following [link](../docs/common/create-and-configure-gateways.md). + +## Istio Addons Integrations + +Istio can be integrated with other software to provide additional functionality +(More information can be found in: https://istio.io/latest/docs/ops/integrations/). +The following addons are for demonstration or development purposes only and +should not be used in production environments: + +For installation steps, refer to the following [link](../docs/common/istio-addons-integrations.md). + + +## Undeploying Istio and the Sail Operator + +### Deleting Istio + + ```sh + $ kubectl -n istio-system delete istio default + ``` + +### Deleting IstioCNI (in OpenShift cluster platform) + + ```sh + $ kubectl -n istio-cni delete istiocni default + ``` + +### Uninstall the Sail Operator using Helm + + ```sh + $ helm uninstall sail-operator --namespace sail-operator + ``` + +### Deleting the Project namespaces + + ```sh + $ kubectl delete namespace istio-system + $ kubectl delete namespace istio-cni + $ kubectl delete namespace sail-operator + ``` diff --git a/docs/README.md b/docs/README.md index 64bdbefdd..3a7a4069f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -260,7 +260,7 @@ When the `InPlace` strategy is used, the existing Istio control plane is replace Prerequisites: * Sail Operator is installed. -* `istioctl` is installed. +* `istioctl` is [installed](common/istio-addons-integrations.md). Steps: 1. Create the `istio-system` namespace. @@ -336,7 +336,7 @@ When the `RevisionBased` strategy is used, a new Istio control plane instance is Prerequisites: * Sail Operator is installed. -* `istioctl` is installed. +* `istioctl` is [installed](common/istio-addons-integrations.md). Steps: diff --git a/docs/common/create-and-configure-gateways.md b/docs/common/create-and-configure-gateways.md new file mode 100644 index 000000000..00fdca0ee --- /dev/null +++ b/docs/common/create-and-configure-gateways.md @@ -0,0 +1,84 @@ +## Creating and Configuring Gateways + +The Sail Operator does not deploy Ingress or Egress Gateways. Gateways are not +part of the control plane. As a security best-practice, Ingress and Egress +Gateways should be deployed in a different namespace than the namespace that +contains the control plane. + +You can deploy gateways using either the Gateway API or Gateway Injection methods. + + +### Option 1: Istio Gateway Injection + +Gateway Injection uses the same mechanisms as Istio sidecar injection to create +a gateway from a `Deployment` resource that is paired with a `Service` resource +that can be made accessible from outside the cluster. For more information, see +[Installing Gateways](https://preliminary.istio.io/latest/docs/setup/additional-setup/gateway/#deploying-a-gateway). + +To configure gateway injection with the `bookinfo` application, we have provided +a [sample gateway configuration](../chart/samples/ingress-gateway.yaml?raw=1) that should be applied in the namespace +where the application is installed: + +1. Create the `istio-ingressgateway` deployment and service: + + ```sh + $ oc apply -f -n ingress-gateway.yaml + ``` + +2. Configure the `bookinfo` application with the new gateway: + + ```sh + $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/bookinfo-gateway.yaml + ``` + +3. On OpenShift, you can use a [Route](https://docs.openshift.com/container-platform/4.13/networking/routes/route-configuration.html) to expose the gateway externally: + + ```sh + $ oc expose service istio-ingressgateway + ``` + +4. Finally, obtain the gateway host name and the URL of the product page: + + ```sh + $ HOST=$(oc get route istio-ingressgateway -o jsonpath='{.spec.host}') + $ echo http://$HOST/productpage + ``` + +Verify that the `productpage` is accessible from a web browser. + + +### Option 2: Kubernetes Gateway API + +Istio includes support for Kubernetes [Gateway API](https://gateway-api.sigs.k8s.io/) and intends to make it +the default API for [traffic management in the future](https://istio.io/latest/blog/2022/gateway-api-beta/). For more +information, see Istio's [Kubernetes Gateway API](https://istio.io/latest/docs/tasks/traffic-management/ingress/gateway-api/) page. + +As of Kubernetes 1.28 and OpenShift 4.14, the Kubernetes Gateway API CRDs are +not available by default and must be enabled to be used. This can be done with +the command: + +```sh +$ oc get crd gateways.gateway.networking.k8s.io &> /dev/null || { oc kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.0.0" | oc apply -f -; } +``` + +To configure `bookinfo` with a gateway using `Gateway API`: + +1. Create and configure a gateway using a `Gateway` and `HTTPRoute` resource: + + ```sh + $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/gateway-api/bookinfo-gateway.yaml + ``` + +2. Retrieve the host, port and gateway URL: + + ```sh + $ export INGRESS_HOST=$(oc get gtw bookinfo-gateway -o jsonpath='{.status.addresses[0].value}') + $ export INGRESS_PORT=$(oc get gtw bookinfo-gateway -o jsonpath='{.spec.listeners[?(@.name=="http")].port}') + $ export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT + ``` + +3. Obtain the `productpage` URL and check that you can visit it from a browser: + + ```sh + $ echo "http://${GATEWAY_URL}/productpage" + ``` diff --git a/docs/common/install-bookinfo-app.md b/docs/common/install-bookinfo-app.md new file mode 100644 index 000000000..0026b4d8e --- /dev/null +++ b/docs/common/install-bookinfo-app.md @@ -0,0 +1,30 @@ +## Installing the Bookinfo Application + +You can use the `bookinfo` example application to explore service mesh features. +Using the `bookinfo` application, you can easily confirm that requests from a +web browser pass through the mesh and reach the application. + +The `bookinfo` application displays information about a book, similar to a +single catalog entry of an online book store. The application displays a page +that describes the book, lists book details (ISBN, number of pages, and other +information), and book reviews. + +The `bookinfo` application is exposed through the mesh, and the mesh configuration +determines how the microservices comprising the application are used to serve +requests. The review information comes from one of three services: `reviews-v1`, +`reviews-v2` or `reviews-v3`. If you deploy the `bookinfo` application without +defining the `reviews` virtual service, then the mesh uses a round-robin rule to +route requests to a service. + +By deploying the `reviews` virtual service, you can specify a different behavior. +For example, you can specify that if a user logs into the `bookinfo` application, +then the mesh routes requests to the `reviews-v2` service, and the application +displays reviews with black stars. If a user does not log into the `bookinfo` +application, then the mesh routes requests to the `reviews-v3` service, and the +application displays reviews with red stars. + +For more information, see [Bookinfo Application](https://istio.io/latest/docs/examples/bookinfo/) in the upstream Istio documentation. + +After following the instructions for [Deploying the application](https://istio.io/latest/docs/examples/bookinfo/#start-the-application-services), **you +will need to create and configure a gateway** for the `bookinfo` application to +be accessible outside the cluster. diff --git a/docs/common/install-istioctl-tool.md b/docs/common/install-istioctl-tool.md new file mode 100644 index 000000000..ec489105e --- /dev/null +++ b/docs/common/install-istioctl-tool.md @@ -0,0 +1,52 @@ +## Installing the istioctl tool + +The `istioctl` tool is a configuration command line utility that allows service +operators to debug and diagnose Istio service mesh deployments. + + +### Prerequisites + +Use an `istioctl` version that is the same version as the Istio control plane +for the Service Mesh deployment. See [Istio Releases](https://github.com/istio/istio/releases) for a list of valid +releases, including Beta releases. + + +### Procedure + +1. Confirm if you have `istioctl` installed, and if so which version, by running +the following command at the terminal: + + ```sh + $ istioctl version + ``` + +2. Confirm the version of Istio you are using by running the following command +at the terminal: + + ```sh + $ oc -n istio-system get istio + ``` + +3. Install `istioctl` by running the following command at the terminal: + + ```sh + $ curl -sL https://istio.io/downloadIstioctl | ISTIO_VERSION= sh - + ``` + Replace `` with the version of Istio you are using. + +4. Put the `istioctl` directory on path by running the following command at the terminal: + + ```sh + $ export PATH=$HOME/.istioctl/bin:$PATH + ``` + +5. Confirm that the `istioctl` client version and the Istio control plane +version now match (or are within one version) by running the following command +at the terminal: + + ```sh + $ istioctl version + ``` + + +*Note*: `istioctl install` is not supported. The Sail Operator installs Istio. diff --git a/docs/common/istio-addons-integrations.md b/docs/common/istio-addons-integrations.md new file mode 100644 index 000000000..36f0ee1da --- /dev/null +++ b/docs/common/istio-addons-integrations.md @@ -0,0 +1,119 @@ +## Istio Addons Integrations + +Istio can be integrated with other software to provide additional functionality +(More information can be found in: https://istio.io/latest/docs/ops/integrations/). +The following addons are for demonstration or development purposes only and +should not be used in production environments: + + +### Prometheus + +`Prometheus` is an open-source systems monitoring and alerting toolkit. You can +use `Prometheus` with the Sail Operator to keep an eye on how healthy Istio and +the apps in the service mesh are, for more information, see [Prometheus](https://istio.io/latest/docs/ops/integrations/prometheus/). + +To install Prometheus, perform the following steps: + +1. Deploy `Prometheus`: + + ```sh + $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/prometheus.yaml + ``` +2. Access to `Prometheus`console: + + * Expose the `Prometheus` service externally: + + ```sh + $ oc expose service prometheus -n istio-system + ``` + * Get the route of the service and open the URL in the web browser + + ```sh + $ oc get route prometheus -o jsonpath='{.spec.host}' -n istio-system + ``` + + +### Grafana + +`Grafana` is an open-source platform for monitoring and observability. You can +use `Grafana` with the Sail Operator to configure dashboards for istio, see +[Grafana](https://istio.io/latest/docs/ops/integrations/grafana/) for more information. + +To install Grafana, perform the following steps: + +1. Deploy `Grafana`: + + ```sh + $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/grafana.yaml + ``` + +2. Access to `Grafana`console: + + * Expose the `Grafana` service externally + + ```sh + $ oc expose service grafana -n istio-system + ``` + * Get the route of the service and open the URL in the web browser + + ```sh + $ oc get route grafana -o jsonpath='{.spec.host}' -n istio-system + ``` + + +### Jaeger + +`Jaeger` is an open-source end-to-end distributed tracing system. You can use +`Jaeger` with the Sail Operator to monitor and troubleshoot transactions in +complex distributed systems, see [Jaeger](https://istio.io/latest/docs/ops/integrations/jaeger/) for more information. + +To install Jaeger, perform the following steps: + +1. Deploy `Jaeger`: + + ```sh + $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/jaeger.yaml + ``` +2. Access to `Jaeger` console: + + * Expose the `Jaeger` service externally: + + ```sh + $ oc expose svc/tracing -n istio-system + ``` + + * Get the route of the service and open the URL in the web browser + + ```sh + $ oc get route tracing -o jsonpath='{.spec.host}' -n istio-system + ``` +*Note*: if you want to see some traces you can refresh several times the product +page of bookinfo app to start generating traces. + + +### Kiali + +`Kiali` is an open-source project that provides a graphical user interface to +visualize the service mesh topology, see [Kiali](https://istio.io/latest/docs/ops/integrations/kiali/) for more information. + +To install Kiali, perform the following steps: + +1. Deploy `Kiali`: + + ```sh + $ oc apply -f https://raw.githubusercontent.com/istio/istio/master/samples/addons/kiali.yaml + ``` + +2. Access to `Kiali` console: + + * Expose the `Kiali` service externally: + + ```sh + $ oc expose service kiali -n istio-system + ``` + + * Get the route of the service and open the URL in the web browser + + ```sh + $ oc get route kiali -o jsonpath='{.spec.host}' -n istio-system + ``` From f896eb5c1e82e1a3a0db35b0afce0d259b578d97 Mon Sep 17 00:00:00 2001 From: Francisco Herrera Date: Wed, 18 Sep 2024 14:37:17 +0200 Subject: [PATCH 04/25] Increase run operator-sdk run bundle timeout in OLM deploy (#345) Signed-off-by: frherrer --- tests/e2e/common-operator-integ-suite.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e/common-operator-integ-suite.sh b/tests/e2e/common-operator-integ-suite.sh index dd5625bb8..d4fdedb1d 100755 --- a/tests/e2e/common-operator-integ-suite.sh +++ b/tests/e2e/common-operator-integ-suite.sh @@ -237,7 +237,7 @@ if [ "${SKIP_BUILD}" == "false" ]; then # Create operator namespace ${COMMAND} create ns "${NAMESPACE}" || echo "Creation of namespace ${NAMESPACE} failed with the message: $?" # Deploy the operator using OLM - ${OPERATOR_SDK} run bundle "${BUNDLE_IMG}" -n "${NAMESPACE}" --skip-tls + ${OPERATOR_SDK} run bundle "${BUNDLE_IMG}" -n "${NAMESPACE}" --skip-tls --timeout 5m # Wait for the operator to be ready ${COMMAND} wait --for=condition=available deployment/"${DEPLOYMENT_NAME}" -n "${NAMESPACE}" --timeout=5m From 5aea1349c473263bf8f4a3051826631f1617b57a Mon Sep 17 00:00:00 2001 From: Francisco Herrera Date: Thu, 19 Sep 2024 17:13:18 +0200 Subject: [PATCH 05/25] Update latest Istio version to latest commit upstream (#348) Signed-off-by: frherrer --- .../sailoperator.clusterserviceversion.yaml | 20 ++++---- chart/values.yaml | 2 +- resources/latest/charts/base/Chart.yaml | 4 +- .../latest/charts/base/crds/crd-all.gen.yaml | 6 --- .../profile-compatibility-version-1.21.yaml | 5 ++ .../profile-compatibility-version-1.22.yaml | 5 ++ .../profile-compatibility-version-1.23.yaml | 11 +++- .../base/files/profile-openshift-ambient.yaml | 28 ---------- .../base/files/profile-platform-k3d.yaml | 7 +++ .../base/files/profile-platform-k3s.yaml | 7 +++ .../base/files/profile-platform-microk8s.yaml | 7 +++ .../base/files/profile-platform-minikube.yaml | 6 +++ .../files/profile-platform-openshift.yaml} | 7 +-- .../latest/charts/base/templates/crds.yaml | 10 ++++ .../charts/base/templates/endpoints.yaml | 2 +- .../charts/base/templates/services.yaml | 2 +- .../base/templates/zzy_descope_legacy.yaml | 3 ++ .../charts/base/templates/zzz_profile.yaml | 7 +++ resources/latest/charts/base/values.yaml | 4 ++ resources/latest/charts/cni/Chart.yaml | 4 +- .../profile-compatibility-version-1.21.yaml | 5 ++ .../profile-compatibility-version-1.22.yaml | 5 ++ .../profile-compatibility-version-1.23.yaml | 11 +++- .../cni/files/profile-openshift-ambient.yaml | 28 ---------- .../cni/files/profile-platform-k3d.yaml | 7 +++ .../cni/files/profile-platform-k3s.yaml | 7 +++ .../cni/files/profile-platform-microk8s.yaml | 7 +++ .../cni/files/profile-platform-minikube.yaml | 6 +++ .../files/profile-platform-openshift.yaml} | 7 +-- .../charts/cni/templates/clusterrole.yaml | 2 +- .../charts/cni/templates/daemonset.yaml | 1 - .../charts/cni/templates/zzz_profile.yaml | 7 +++ resources/latest/charts/cni/values.yaml | 2 +- resources/latest/charts/gateway/Chart.yaml | 4 +- .../profile-compatibility-version-1.21.yaml | 5 ++ .../profile-compatibility-version-1.22.yaml | 5 ++ .../profile-compatibility-version-1.23.yaml | 11 +++- .../files/profile-openshift-ambient.yaml | 28 ---------- .../gateway/files/profile-platform-k3d.yaml | 7 +++ .../gateway/files/profile-platform-k3s.yaml | 7 +++ .../files/profile-platform-microk8s.yaml | 7 +++ .../files/profile-platform-minikube.yaml | 6 +++ .../files/profile-platform-openshift.yaml} | 7 +-- .../charts/gateway/templates/deployment.yaml | 13 ++++- .../charts/gateway/templates/zzz_profile.yaml | 7 +++ .../latest/charts/gateway/values.schema.json | 9 ++++ resources/latest/charts/gateway/values.yaml | 12 +++++ .../latest/charts/istiod-remote/Chart.yaml | 4 +- .../files/injection-template.yaml | 2 + .../profile-compatibility-version-1.21.yaml | 5 ++ .../profile-compatibility-version-1.22.yaml | 5 ++ .../profile-compatibility-version-1.23.yaml | 11 +++- .../files/profile-openshift-ambient.yaml | 28 ---------- .../files/profile-platform-k3d.yaml | 7 +++ .../files/profile-platform-k3s.yaml | 7 +++ .../files/profile-platform-microk8s.yaml | 7 +++ .../files/profile-platform-minikube.yaml | 6 +++ .../files/profile-platform-openshift.yaml} | 7 +-- .../istiod-remote/templates/clusterrole.yaml | 51 +++++++++++++------ .../istiod-remote/templates/endpoints.yaml | 2 +- .../templates/mutatingwebhook.yaml | 2 +- .../istiod-remote/templates/services.yaml | 2 +- .../istiod-remote/templates/zzz_profile.yaml | 7 +++ .../latest/charts/istiod-remote/values.yaml | 15 +++++- resources/latest/charts/istiod/Chart.yaml | 4 +- .../istiod/files/injection-template.yaml | 2 + .../profile-compatibility-version-1.21.yaml | 5 ++ .../profile-compatibility-version-1.22.yaml | 5 ++ .../profile-compatibility-version-1.23.yaml | 11 +++- .../files/profile-openshift-ambient.yaml | 28 ---------- .../istiod/files/profile-openshift.yaml | 20 -------- .../istiod/files/profile-platform-k3d.yaml | 7 +++ .../istiod/files/profile-platform-k3s.yaml | 7 +++ .../files/profile-platform-microk8s.yaml | 7 +++ .../files/profile-platform-minikube.yaml | 6 +++ .../files/profile-platform-openshift.yaml | 17 +++++++ .../latest/charts/istiod/files/waypoint.yaml | 16 ++++++ .../charts/istiod/templates/clusterrole.yaml | 51 +++++++++++++------ .../charts/istiod/templates/deployment.yaml | 14 ++++- .../istiod/templates/mutatingwebhook.yaml | 2 +- .../istiod/templates/revision-tags.yaml | 2 +- .../charts/istiod/templates/zzz_profile.yaml | 7 +++ resources/latest/charts/istiod/values.yaml | 20 +++++++- resources/latest/charts/ztunnel/Chart.yaml | 4 +- .../profile-compatibility-version-1.21.yaml | 5 ++ .../profile-compatibility-version-1.22.yaml | 5 ++ .../profile-compatibility-version-1.23.yaml | 11 +++- .../files/profile-openshift-ambient.yaml | 28 ---------- .../ztunnel/files/profile-openshift.yaml | 20 -------- .../ztunnel/files/profile-platform-k3d.yaml | 7 +++ .../ztunnel/files/profile-platform-k3s.yaml | 7 +++ .../files/profile-platform-microk8s.yaml | 7 +++ .../files/profile-platform-minikube.yaml | 6 +++ .../files/profile-platform-openshift.yaml | 17 +++++++ .../charts/ztunnel/templates/daemonset.yaml | 2 +- .../charts/ztunnel/templates/zzz_profile.yaml | 7 +++ resources/latest/charts/ztunnel/values.yaml | 2 +- versions.yaml | 14 ++--- 98 files changed, 593 insertions(+), 318 deletions(-) delete mode 100644 resources/latest/charts/base/files/profile-openshift-ambient.yaml create mode 100644 resources/latest/charts/base/files/profile-platform-k3d.yaml create mode 100644 resources/latest/charts/base/files/profile-platform-k3s.yaml create mode 100644 resources/latest/charts/base/files/profile-platform-microk8s.yaml create mode 100644 resources/latest/charts/base/files/profile-platform-minikube.yaml rename resources/latest/charts/{gateway/files/profile-openshift.yaml => base/files/profile-platform-openshift.yaml} (84%) create mode 100644 resources/latest/charts/base/templates/zzy_descope_legacy.yaml delete mode 100644 resources/latest/charts/cni/files/profile-openshift-ambient.yaml create mode 100644 resources/latest/charts/cni/files/profile-platform-k3d.yaml create mode 100644 resources/latest/charts/cni/files/profile-platform-k3s.yaml create mode 100644 resources/latest/charts/cni/files/profile-platform-microk8s.yaml create mode 100644 resources/latest/charts/cni/files/profile-platform-minikube.yaml rename resources/latest/charts/{base/files/profile-openshift.yaml => cni/files/profile-platform-openshift.yaml} (84%) delete mode 100644 resources/latest/charts/gateway/files/profile-openshift-ambient.yaml create mode 100644 resources/latest/charts/gateway/files/profile-platform-k3d.yaml create mode 100644 resources/latest/charts/gateway/files/profile-platform-k3s.yaml create mode 100644 resources/latest/charts/gateway/files/profile-platform-microk8s.yaml create mode 100644 resources/latest/charts/gateway/files/profile-platform-minikube.yaml rename resources/latest/charts/{istiod-remote/files/profile-openshift.yaml => gateway/files/profile-platform-openshift.yaml} (84%) delete mode 100644 resources/latest/charts/istiod-remote/files/profile-openshift-ambient.yaml create mode 100644 resources/latest/charts/istiod-remote/files/profile-platform-k3d.yaml create mode 100644 resources/latest/charts/istiod-remote/files/profile-platform-k3s.yaml create mode 100644 resources/latest/charts/istiod-remote/files/profile-platform-microk8s.yaml create mode 100644 resources/latest/charts/istiod-remote/files/profile-platform-minikube.yaml rename resources/latest/charts/{cni/files/profile-openshift.yaml => istiod-remote/files/profile-platform-openshift.yaml} (84%) delete mode 100644 resources/latest/charts/istiod/files/profile-openshift-ambient.yaml delete mode 100644 resources/latest/charts/istiod/files/profile-openshift.yaml create mode 100644 resources/latest/charts/istiod/files/profile-platform-k3d.yaml create mode 100644 resources/latest/charts/istiod/files/profile-platform-k3s.yaml create mode 100644 resources/latest/charts/istiod/files/profile-platform-microk8s.yaml create mode 100644 resources/latest/charts/istiod/files/profile-platform-minikube.yaml create mode 100644 resources/latest/charts/istiod/files/profile-platform-openshift.yaml delete mode 100644 resources/latest/charts/ztunnel/files/profile-openshift-ambient.yaml delete mode 100644 resources/latest/charts/ztunnel/files/profile-openshift.yaml create mode 100644 resources/latest/charts/ztunnel/files/profile-platform-k3d.yaml create mode 100644 resources/latest/charts/ztunnel/files/profile-platform-k3s.yaml create mode 100644 resources/latest/charts/ztunnel/files/profile-platform-microk8s.yaml create mode 100644 resources/latest/charts/ztunnel/files/profile-platform-minikube.yaml create mode 100644 resources/latest/charts/ztunnel/files/profile-platform-openshift.yaml diff --git a/bundle/manifests/sailoperator.clusterserviceversion.yaml b/bundle/manifests/sailoperator.clusterserviceversion.yaml index c30478933..47e035fbc 100644 --- a/bundle/manifests/sailoperator.clusterserviceversion.yaml +++ b/bundle/manifests/sailoperator.clusterserviceversion.yaml @@ -34,7 +34,7 @@ metadata: capabilities: Seamless Upgrades categories: OpenShift Optional, Integration & Delivery, Networking, Security containerImage: quay.io/maistra-dev/sail-operator:0.2-latest - createdAt: "2024-08-21T08:58:14Z" + createdAt: "2024-09-19T14:41:31Z" description: Experimental operator for installing Istio service mesh features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "true" @@ -374,7 +374,7 @@ spec: - v1.23.0 - v1.22.3 - v1.21.5 - - latest (b28bdd77) + - latest (fe2a0468) [See this page](https://github.com/istio-ecosystem/sail-operator/blob/main/bundle/README.md) for instructions on how to use it. displayName: Sail Operator @@ -598,10 +598,10 @@ spec: template: metadata: annotations: - images.latest.cni: gcr.io/istio-testing/install-cni:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 - images.latest.istiod: gcr.io/istio-testing/pilot:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 - images.latest.proxy: gcr.io/istio-testing/proxyv2:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 - images.latest.ztunnel: gcr.io/istio-testing/ztunnel:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + images.latest.cni: gcr.io/istio-testing/install-cni:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe + images.latest.istiod: gcr.io/istio-testing/pilot:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe + images.latest.proxy: gcr.io/istio-testing/proxyv2:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe + images.latest.ztunnel: gcr.io/istio-testing/ztunnel:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe images.v1_21_5.cni: docker.io/istio/install-cni:1.21.5 images.v1_21_5.istiod: docker.io/istio/pilot:1.21.5 images.v1_21_5.proxy: docker.io/istio/proxyv2:1.21.5 @@ -767,13 +767,13 @@ spec: provider: name: Red Hat, Inc. relatedImages: - - image: gcr.io/istio-testing/install-cni:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + - image: gcr.io/istio-testing/install-cni:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe name: latest.cni - - image: gcr.io/istio-testing/pilot:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + - image: gcr.io/istio-testing/pilot:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe name: latest.istiod - - image: gcr.io/istio-testing/proxyv2:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + - image: gcr.io/istio-testing/proxyv2:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe name: latest.proxy - - image: gcr.io/istio-testing/ztunnel:1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + - image: gcr.io/istio-testing/ztunnel:1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe name: latest.ztunnel - image: docker.io/istio/install-cni:1.21.5 name: v1_21_5.cni diff --git a/chart/values.yaml b/chart/values.yaml index de6eec47a..d86bac087 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -19,7 +19,7 @@ csv: - v1.23.0 - v1.22.3 - v1.21.5 - - latest (b28bdd77) + - latest (fe2a0468) [See this page](https://github.com/istio-ecosystem/sail-operator/blob/main/bundle/README.md) for instructions on how to use it. support: Community based diff --git a/resources/latest/charts/base/Chart.yaml b/resources/latest/charts/base/Chart.yaml index 7d589bb9e..7ad77f51a 100644 --- a/resources/latest/charts/base/Chart.yaml +++ b/resources/latest/charts/base/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +appVersion: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe description: Helm chart for deploying Istio cluster resources and CRDs icon: https://istio.io/latest/favicons/android-192x192.png keywords: @@ -7,4 +7,4 @@ keywords: name: base sources: - https://github.com/istio/istio -version: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +version: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe diff --git a/resources/latest/charts/base/crds/crd-all.gen.yaml b/resources/latest/charts/base/crds/crd-all.gen.yaml index 675d42a84..45c7e2b61 100644 --- a/resources/latest/charts/base/crds/crd-all.gen.yaml +++ b/resources/latest/charts/base/crds/crd-all.gen.yaml @@ -13329,8 +13329,6 @@ spec: type: integer type: object x-kubernetes-validations: - - message: Address is required - rule: has(self.address) || has(self.network) - message: UDS may not include ports rule: '(has(self.address) && self.address.startsWith(''unix://'')) ? !has(self.ports) : true' @@ -13595,8 +13593,6 @@ spec: type: integer type: object x-kubernetes-validations: - - message: Address is required - rule: has(self.address) || has(self.network) - message: UDS may not include ports rule: '(has(self.address) && self.address.startsWith(''unix://'')) ? !has(self.ports) : true' @@ -13861,8 +13857,6 @@ spec: type: integer type: object x-kubernetes-validations: - - message: Address is required - rule: has(self.address) || has(self.network) - message: UDS may not include ports rule: '(has(self.address) && self.address.startsWith(''unix://'')) ? !has(self.ports) : true' diff --git a/resources/latest/charts/base/files/profile-compatibility-version-1.21.yaml b/resources/latest/charts/base/files/profile-compatibility-version-1.21.yaml index 2b72bd93c..c8da4d2e1 100644 --- a/resources/latest/charts/base/files/profile-compatibility-version-1.21.yaml +++ b/resources/latest/charts/base/files/profile-compatibility-version-1.21.yaml @@ -15,6 +15,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: # 1.22 behavioral changes @@ -23,6 +25,9 @@ meshConfig: ISTIO_DELTA_XDS: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" tracing: zipkin: address: zipkin.istio-system:9411 diff --git a/resources/latest/charts/base/files/profile-compatibility-version-1.22.yaml b/resources/latest/charts/base/files/profile-compatibility-version-1.22.yaml index 2badb70a5..70d8eb40c 100644 --- a/resources/latest/charts/base/files/profile-compatibility-version-1.22.yaml +++ b/resources/latest/charts/base/files/profile-compatibility-version-1.22.yaml @@ -11,6 +11,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: defaultConfig: @@ -19,3 +21,6 @@ meshConfig: ENABLE_DEFERRED_CLUSTER_CREATION: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/base/files/profile-compatibility-version-1.23.yaml b/resources/latest/charts/base/files/profile-compatibility-version-1.23.yaml index f855500b0..636bb6f15 100644 --- a/resources/latest/charts/base/files/profile-compatibility-version-1.23.yaml +++ b/resources/latest/charts/base/files/profile-compatibility-version-1.23.yaml @@ -7,4 +7,13 @@ pilot: # 1.24 behavioral changes ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" - PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" \ No newline at end of file + PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" + +meshConfig: + defaultConfig: + proxyMetadata: + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/base/files/profile-openshift-ambient.yaml b/resources/latest/charts/base/files/profile-openshift-ambient.yaml deleted file mode 100644 index 444665932..000000000 --- a/resources/latest/charts/base/files/profile-openshift-ambient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -meshConfig: - defaultConfig: - proxyMetadata: - ISTIO_META_ENABLE_HBONE: "true" -global: - platform: openshift - variant: distroless - seLinuxOptions: - type: spc_t -cni: - ambient: - enabled: true - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -pilot: - cni: - enabled: true - provider: "multus" - env: - PILOT_ENABLE_AMBIENT: "true" \ No newline at end of file diff --git a/resources/latest/charts/base/files/profile-platform-k3d.yaml b/resources/latest/charts/base/files/profile-platform-k3d.yaml new file mode 100644 index 000000000..cd86d9ec5 --- /dev/null +++ b/resources/latest/charts/base/files/profile-platform-k3d.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /bin diff --git a/resources/latest/charts/base/files/profile-platform-k3s.yaml b/resources/latest/charts/base/files/profile-platform-k3s.yaml new file mode 100644 index 000000000..f3f2884aa --- /dev/null +++ b/resources/latest/charts/base/files/profile-platform-k3s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /var/lib/rancher/k3s/data/current/bin/ diff --git a/resources/latest/charts/base/files/profile-platform-microk8s.yaml b/resources/latest/charts/base/files/profile-platform-microk8s.yaml new file mode 100644 index 000000000..57d7f5e3c --- /dev/null +++ b/resources/latest/charts/base/files/profile-platform-microk8s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/snap/microk8s/current/args/cni-network + cniBinDir: /var/snap/microk8s/current/opt/cni/bin diff --git a/resources/latest/charts/base/files/profile-platform-minikube.yaml b/resources/latest/charts/base/files/profile-platform-minikube.yaml new file mode 100644 index 000000000..fa9992e20 --- /dev/null +++ b/resources/latest/charts/base/files/profile-platform-minikube.yaml @@ -0,0 +1,6 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniNetnsDir: /var/run/docker/netns diff --git a/resources/latest/charts/gateway/files/profile-openshift.yaml b/resources/latest/charts/base/files/profile-platform-openshift.yaml similarity index 84% rename from resources/latest/charts/gateway/files/profile-openshift.yaml rename to resources/latest/charts/base/files/profile-platform-openshift.yaml index 38357bd99..69eda2b1d 100644 --- a/resources/latest/charts/gateway/files/profile-openshift.yaml +++ b/resources/latest/charts/base/files/profile-platform-openshift.yaml @@ -3,18 +3,15 @@ # If you want to make a change in this file, edit the original one and run "make gen". # The OpenShift profile provides a basic set of settings to run Istio on OpenShift -# CNI must be installed. cni: cniBinDir: /var/lib/cni/bin cniConfDir: /etc/cni/multus/net.d chained: false cniConfFileName: "istio-cni.conf" - logLevel: info provider: "multus" -global: - platform: openshift pilot: cni: enabled: true provider: "multus" -platform: openshift \ No newline at end of file +seLinuxOptions: + type: spc_t diff --git a/resources/latest/charts/base/templates/crds.yaml b/resources/latest/charts/base/templates/crds.yaml index af9901c6e..7714ad8c1 100644 --- a/resources/latest/charts/base/templates/crds.yaml +++ b/resources/latest/charts/base/templates/crds.yaml @@ -1,3 +1,13 @@ {{- if .Values.base.enableCRDTemplates }} +{{- if .Values.base.excludedCRDs }} +{{- range $crd := .Files.Get "crds/crd-all.gen.yaml"|splitList "\n---\n"}} +{{- $name := (index ($crd |fromYaml) "metadata" "name") }} +{{- if not (has $name $.Values.base.excludedCRDs)}} +{{$crd}} +--- +{{- end }} +{{- end }} +{{- else }} {{ .Files.Get "crds/crd-all.gen.yaml" }} {{- end }} +{{- end }} diff --git a/resources/latest/charts/base/templates/endpoints.yaml b/resources/latest/charts/base/templates/endpoints.yaml index 1cc26dd78..1190dfa9b 100644 --- a/resources/latest/charts/base/templates/endpoints.yaml +++ b/resources/latest/charts/base/templates/endpoints.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Endpoints metadata: - {{- if .Values.pilot.enabled }} + {{- if .Values.enabled }} name: istiod{{- if .Values.revision }}-{{ .Values.revision}}{{- end }}-remote {{- else }} name: istiod{{- if .Values.revision }}-{{ .Values.revision}}{{- end }} diff --git a/resources/latest/charts/base/templates/services.yaml b/resources/latest/charts/base/templates/services.yaml index 4290f2848..fe9b701aa 100644 --- a/resources/latest/charts/base/templates/services.yaml +++ b/resources/latest/charts/base/templates/services.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - {{- if .Values.pilot.enabled }} + {{- if .Values.enabled }} # when local istiod is enabled, we can't use istiod service name to reach the remote control plane name: istiod{{- if .Values.revision }}-{{ .Values.revision}}{{- end }}-remote {{- else }} diff --git a/resources/latest/charts/base/templates/zzy_descope_legacy.yaml b/resources/latest/charts/base/templates/zzy_descope_legacy.yaml new file mode 100644 index 000000000..ae8fced29 --- /dev/null +++ b/resources/latest/charts/base/templates/zzy_descope_legacy.yaml @@ -0,0 +1,3 @@ +{{/* Copy anything under `.pilot` to `.`, to avoid the need to specify a redundant prefix. +Due to the file naming, this always happens after zzz_profile.yaml */}} +{{- $_ := mustMergeOverwrite $.Values (index $.Values "pilot") }} \ No newline at end of file diff --git a/resources/latest/charts/base/templates/zzz_profile.yaml b/resources/latest/charts/base/templates/zzz_profile.yaml index b96dcafcb..4eea73812 100644 --- a/resources/latest/charts/base/templates/zzz_profile.yaml +++ b/resources/latest/charts/base/templates/zzz_profile.yaml @@ -33,6 +33,13 @@ Finally, we can set all of that under .Values so the chart behaves without aware {{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }} {{- end }} {{- end }} +{{- if $globals.platform }} +{{- with $.Files.Get (printf "files/profile-platform-%s.yaml" $globals.platform) }} +{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }} +{{- else }} +{{ fail (cat "unknown platform" $globals.platform) }} +{{- end }} +{{- end }} {{- if $profile }} {{- $a := mustMergeOverwrite $defaults $profile }} {{- end }} diff --git a/resources/latest/charts/base/values.yaml b/resources/latest/charts/base/values.yaml index fae4e61e4..ef078c11d 100644 --- a/resources/latest/charts/base/values.yaml +++ b/resources/latest/charts/base/values.yaml @@ -25,6 +25,10 @@ defaults: ipFamilies: [] base: + # A list of CRDs to exclude. Requires `enableCRDTemplates` to be true. + # Example: `excludedCRDs: ["envoyfilters.networking.istio.io"]`. + # Note: when installing with `istioctl`, `enableIstioConfigCRDs=false` must also be set. + excludedCRDs: [] # Used for helm2 to add the CRDs to templates. enableCRDTemplates: false diff --git a/resources/latest/charts/cni/Chart.yaml b/resources/latest/charts/cni/Chart.yaml index 72b0cbb69..41e66e2c6 100644 --- a/resources/latest/charts/cni/Chart.yaml +++ b/resources/latest/charts/cni/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +appVersion: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe description: Helm chart for istio-cni components icon: https://istio.io/latest/favicons/android-192x192.png keywords: @@ -8,4 +8,4 @@ keywords: name: cni sources: - https://github.com/istio/istio -version: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +version: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe diff --git a/resources/latest/charts/cni/files/profile-compatibility-version-1.21.yaml b/resources/latest/charts/cni/files/profile-compatibility-version-1.21.yaml index 2b72bd93c..c8da4d2e1 100644 --- a/resources/latest/charts/cni/files/profile-compatibility-version-1.21.yaml +++ b/resources/latest/charts/cni/files/profile-compatibility-version-1.21.yaml @@ -15,6 +15,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: # 1.22 behavioral changes @@ -23,6 +25,9 @@ meshConfig: ISTIO_DELTA_XDS: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" tracing: zipkin: address: zipkin.istio-system:9411 diff --git a/resources/latest/charts/cni/files/profile-compatibility-version-1.22.yaml b/resources/latest/charts/cni/files/profile-compatibility-version-1.22.yaml index 2badb70a5..70d8eb40c 100644 --- a/resources/latest/charts/cni/files/profile-compatibility-version-1.22.yaml +++ b/resources/latest/charts/cni/files/profile-compatibility-version-1.22.yaml @@ -11,6 +11,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: defaultConfig: @@ -19,3 +21,6 @@ meshConfig: ENABLE_DEFERRED_CLUSTER_CREATION: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/cni/files/profile-compatibility-version-1.23.yaml b/resources/latest/charts/cni/files/profile-compatibility-version-1.23.yaml index f855500b0..636bb6f15 100644 --- a/resources/latest/charts/cni/files/profile-compatibility-version-1.23.yaml +++ b/resources/latest/charts/cni/files/profile-compatibility-version-1.23.yaml @@ -7,4 +7,13 @@ pilot: # 1.24 behavioral changes ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" - PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" \ No newline at end of file + PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" + +meshConfig: + defaultConfig: + proxyMetadata: + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/cni/files/profile-openshift-ambient.yaml b/resources/latest/charts/cni/files/profile-openshift-ambient.yaml deleted file mode 100644 index 444665932..000000000 --- a/resources/latest/charts/cni/files/profile-openshift-ambient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -meshConfig: - defaultConfig: - proxyMetadata: - ISTIO_META_ENABLE_HBONE: "true" -global: - platform: openshift - variant: distroless - seLinuxOptions: - type: spc_t -cni: - ambient: - enabled: true - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -pilot: - cni: - enabled: true - provider: "multus" - env: - PILOT_ENABLE_AMBIENT: "true" \ No newline at end of file diff --git a/resources/latest/charts/cni/files/profile-platform-k3d.yaml b/resources/latest/charts/cni/files/profile-platform-k3d.yaml new file mode 100644 index 000000000..cd86d9ec5 --- /dev/null +++ b/resources/latest/charts/cni/files/profile-platform-k3d.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /bin diff --git a/resources/latest/charts/cni/files/profile-platform-k3s.yaml b/resources/latest/charts/cni/files/profile-platform-k3s.yaml new file mode 100644 index 000000000..f3f2884aa --- /dev/null +++ b/resources/latest/charts/cni/files/profile-platform-k3s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /var/lib/rancher/k3s/data/current/bin/ diff --git a/resources/latest/charts/cni/files/profile-platform-microk8s.yaml b/resources/latest/charts/cni/files/profile-platform-microk8s.yaml new file mode 100644 index 000000000..57d7f5e3c --- /dev/null +++ b/resources/latest/charts/cni/files/profile-platform-microk8s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/snap/microk8s/current/args/cni-network + cniBinDir: /var/snap/microk8s/current/opt/cni/bin diff --git a/resources/latest/charts/cni/files/profile-platform-minikube.yaml b/resources/latest/charts/cni/files/profile-platform-minikube.yaml new file mode 100644 index 000000000..fa9992e20 --- /dev/null +++ b/resources/latest/charts/cni/files/profile-platform-minikube.yaml @@ -0,0 +1,6 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniNetnsDir: /var/run/docker/netns diff --git a/resources/latest/charts/base/files/profile-openshift.yaml b/resources/latest/charts/cni/files/profile-platform-openshift.yaml similarity index 84% rename from resources/latest/charts/base/files/profile-openshift.yaml rename to resources/latest/charts/cni/files/profile-platform-openshift.yaml index 38357bd99..69eda2b1d 100644 --- a/resources/latest/charts/base/files/profile-openshift.yaml +++ b/resources/latest/charts/cni/files/profile-platform-openshift.yaml @@ -3,18 +3,15 @@ # If you want to make a change in this file, edit the original one and run "make gen". # The OpenShift profile provides a basic set of settings to run Istio on OpenShift -# CNI must be installed. cni: cniBinDir: /var/lib/cni/bin cniConfDir: /etc/cni/multus/net.d chained: false cniConfFileName: "istio-cni.conf" - logLevel: info provider: "multus" -global: - platform: openshift pilot: cni: enabled: true provider: "multus" -platform: openshift \ No newline at end of file +seLinuxOptions: + type: spc_t diff --git a/resources/latest/charts/cni/templates/clusterrole.yaml b/resources/latest/charts/cni/templates/clusterrole.yaml index a1640c5d4..30f159603 100644 --- a/resources/latest/charts/cni/templates/clusterrole.yaml +++ b/resources/latest/charts/cni/templates/clusterrole.yaml @@ -18,7 +18,7 @@ rules: - apiGroups: [""] resources: ["pods","nodes","namespaces"] verbs: ["get", "list", "watch"] -{{- if (eq .Values.platform "openshift") }} +{{- if (eq (coalesce .Values.platform .Values.global.platform) "openshift") }} - apiGroups: ["security.openshift.io"] resources: ["securitycontextconstraints"] resourceNames: ["privileged"] diff --git a/resources/latest/charts/cni/templates/daemonset.yaml b/resources/latest/charts/cni/templates/daemonset.yaml index fad5c3287..2ce4a0665 100644 --- a/resources/latest/charts/cni/templates/daemonset.yaml +++ b/resources/latest/charts/cni/templates/daemonset.yaml @@ -82,7 +82,6 @@ spec: {{- end }} ports: - containerPort: 15014 - hostPort: 15014 name: metrics protocol: TCP readinessProbe: diff --git a/resources/latest/charts/cni/templates/zzz_profile.yaml b/resources/latest/charts/cni/templates/zzz_profile.yaml index b96dcafcb..4eea73812 100644 --- a/resources/latest/charts/cni/templates/zzz_profile.yaml +++ b/resources/latest/charts/cni/templates/zzz_profile.yaml @@ -33,6 +33,13 @@ Finally, we can set all of that under .Values so the chart behaves without aware {{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }} {{- end }} {{- end }} +{{- if $globals.platform }} +{{- with $.Files.Get (printf "files/profile-platform-%s.yaml" $globals.platform) }} +{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }} +{{- else }} +{{ fail (cat "unknown platform" $globals.platform) }} +{{- end }} +{{- end }} {{- if $profile }} {{- $a := mustMergeOverwrite $defaults $profile }} {{- end }} diff --git a/resources/latest/charts/cni/values.yaml b/resources/latest/charts/cni/values.yaml index ff80bed0a..3fdef82fe 100644 --- a/resources/latest/charts/cni/values.yaml +++ b/resources/latest/charts/cni/values.yaml @@ -112,7 +112,7 @@ defaults: hub: gcr.io/istio-testing # Default tag for Istio images. - tag: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + tag: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe # Variant of the image to use. # Currently supported are: [debug, distroless] diff --git a/resources/latest/charts/gateway/Chart.yaml b/resources/latest/charts/gateway/Chart.yaml index 344196f94..aad800d17 100644 --- a/resources/latest/charts/gateway/Chart.yaml +++ b/resources/latest/charts/gateway/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +appVersion: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe description: Helm chart for deploying Istio gateways icon: https://istio.io/latest/favicons/android-192x192.png keywords: @@ -9,4 +9,4 @@ name: gateway sources: - https://github.com/istio/istio type: application -version: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +version: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe diff --git a/resources/latest/charts/gateway/files/profile-compatibility-version-1.21.yaml b/resources/latest/charts/gateway/files/profile-compatibility-version-1.21.yaml index 2b72bd93c..c8da4d2e1 100644 --- a/resources/latest/charts/gateway/files/profile-compatibility-version-1.21.yaml +++ b/resources/latest/charts/gateway/files/profile-compatibility-version-1.21.yaml @@ -15,6 +15,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: # 1.22 behavioral changes @@ -23,6 +25,9 @@ meshConfig: ISTIO_DELTA_XDS: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" tracing: zipkin: address: zipkin.istio-system:9411 diff --git a/resources/latest/charts/gateway/files/profile-compatibility-version-1.22.yaml b/resources/latest/charts/gateway/files/profile-compatibility-version-1.22.yaml index 2badb70a5..70d8eb40c 100644 --- a/resources/latest/charts/gateway/files/profile-compatibility-version-1.22.yaml +++ b/resources/latest/charts/gateway/files/profile-compatibility-version-1.22.yaml @@ -11,6 +11,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: defaultConfig: @@ -19,3 +21,6 @@ meshConfig: ENABLE_DEFERRED_CLUSTER_CREATION: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/gateway/files/profile-compatibility-version-1.23.yaml b/resources/latest/charts/gateway/files/profile-compatibility-version-1.23.yaml index f855500b0..636bb6f15 100644 --- a/resources/latest/charts/gateway/files/profile-compatibility-version-1.23.yaml +++ b/resources/latest/charts/gateway/files/profile-compatibility-version-1.23.yaml @@ -7,4 +7,13 @@ pilot: # 1.24 behavioral changes ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" - PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" \ No newline at end of file + PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" + +meshConfig: + defaultConfig: + proxyMetadata: + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/gateway/files/profile-openshift-ambient.yaml b/resources/latest/charts/gateway/files/profile-openshift-ambient.yaml deleted file mode 100644 index 444665932..000000000 --- a/resources/latest/charts/gateway/files/profile-openshift-ambient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -meshConfig: - defaultConfig: - proxyMetadata: - ISTIO_META_ENABLE_HBONE: "true" -global: - platform: openshift - variant: distroless - seLinuxOptions: - type: spc_t -cni: - ambient: - enabled: true - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -pilot: - cni: - enabled: true - provider: "multus" - env: - PILOT_ENABLE_AMBIENT: "true" \ No newline at end of file diff --git a/resources/latest/charts/gateway/files/profile-platform-k3d.yaml b/resources/latest/charts/gateway/files/profile-platform-k3d.yaml new file mode 100644 index 000000000..cd86d9ec5 --- /dev/null +++ b/resources/latest/charts/gateway/files/profile-platform-k3d.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /bin diff --git a/resources/latest/charts/gateway/files/profile-platform-k3s.yaml b/resources/latest/charts/gateway/files/profile-platform-k3s.yaml new file mode 100644 index 000000000..f3f2884aa --- /dev/null +++ b/resources/latest/charts/gateway/files/profile-platform-k3s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /var/lib/rancher/k3s/data/current/bin/ diff --git a/resources/latest/charts/gateway/files/profile-platform-microk8s.yaml b/resources/latest/charts/gateway/files/profile-platform-microk8s.yaml new file mode 100644 index 000000000..57d7f5e3c --- /dev/null +++ b/resources/latest/charts/gateway/files/profile-platform-microk8s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/snap/microk8s/current/args/cni-network + cniBinDir: /var/snap/microk8s/current/opt/cni/bin diff --git a/resources/latest/charts/gateway/files/profile-platform-minikube.yaml b/resources/latest/charts/gateway/files/profile-platform-minikube.yaml new file mode 100644 index 000000000..fa9992e20 --- /dev/null +++ b/resources/latest/charts/gateway/files/profile-platform-minikube.yaml @@ -0,0 +1,6 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniNetnsDir: /var/run/docker/netns diff --git a/resources/latest/charts/istiod-remote/files/profile-openshift.yaml b/resources/latest/charts/gateway/files/profile-platform-openshift.yaml similarity index 84% rename from resources/latest/charts/istiod-remote/files/profile-openshift.yaml rename to resources/latest/charts/gateway/files/profile-platform-openshift.yaml index 38357bd99..69eda2b1d 100644 --- a/resources/latest/charts/istiod-remote/files/profile-openshift.yaml +++ b/resources/latest/charts/gateway/files/profile-platform-openshift.yaml @@ -3,18 +3,15 @@ # If you want to make a change in this file, edit the original one and run "make gen". # The OpenShift profile provides a basic set of settings to run Istio on OpenShift -# CNI must be installed. cni: cniBinDir: /var/lib/cni/bin cniConfDir: /etc/cni/multus/net.d chained: false cniConfFileName: "istio-cni.conf" - logLevel: info provider: "multus" -global: - platform: openshift pilot: cni: enabled: true provider: "multus" -platform: openshift \ No newline at end of file +seLinuxOptions: + type: spc_t diff --git a/resources/latest/charts/gateway/templates/deployment.yaml b/resources/latest/charts/gateway/templates/deployment.yaml index 73ecc1a73..e9bfbbd36 100644 --- a/resources/latest/charts/gateway/templates/deployment.yaml +++ b/resources/latest/charts/gateway/templates/deployment.yaml @@ -15,6 +15,13 @@ spec: replicas: {{ . }} {{- end }} {{- end }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.minReadySeconds }} + minReadySeconds: {{ . }} + {{- end }} selector: matchLabels: {{- include "gateway.selectorLabels" . | nindent 6 }} @@ -93,7 +100,11 @@ spec: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.volumeMounts }} volumeMounts: - {{ toYaml . | nindent 12 }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} {{- end }} {{- with .Values.nodeSelector }} nodeSelector: diff --git a/resources/latest/charts/gateway/templates/zzz_profile.yaml b/resources/latest/charts/gateway/templates/zzz_profile.yaml index b96dcafcb..4eea73812 100644 --- a/resources/latest/charts/gateway/templates/zzz_profile.yaml +++ b/resources/latest/charts/gateway/templates/zzz_profile.yaml @@ -33,6 +33,13 @@ Finally, we can set all of that under .Values so the chart behaves without aware {{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }} {{- end }} {{- end }} +{{- if $globals.platform }} +{{- with $.Files.Get (printf "files/profile-platform-%s.yaml" $globals.platform) }} +{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }} +{{- else }} +{{ fail (cat "unknown platform" $globals.platform) }} +{{- end }} +{{- end }} {{- if $profile }} {{- $a := mustMergeOverwrite $defaults $profile }} {{- end }} diff --git a/resources/latest/charts/gateway/values.schema.json b/resources/latest/charts/gateway/values.schema.json index 4c4f0836d..3108259a9 100644 --- a/resources/latest/charts/gateway/values.schema.json +++ b/resources/latest/charts/gateway/values.schema.json @@ -60,6 +60,15 @@ "env": { "type": "object" }, + "strategy": { + "type": "object" + }, + "minReadySeconds": { + "type": [ "null", "integer" ] + }, + "readinessProbe": { + "type": [ "null", "object" ] + }, "labels": { "type": "object" }, diff --git a/resources/latest/charts/gateway/values.yaml b/resources/latest/charts/gateway/values.yaml index 72205b4a1..466b9d21c 100644 --- a/resources/latest/charts/gateway/values.yaml +++ b/resources/latest/charts/gateway/values.yaml @@ -84,6 +84,17 @@ defaults: # Pod environment variables env: {} + # Deployment Update strategy + strategy: {} + + # Sets the Deployment minReadySeconds value + minReadySeconds: + + # Optionally configure a custom readinessProbe. By default the control plane + # automatically injects the readinessProbe. If you wish to override that + # behavior, you may define your own readinessProbe here. + readinessProbe: {} + # Labels to apply to all resources labels: {} @@ -137,6 +148,7 @@ defaults: # podDisruptionBudget: {} + # Sets the per-pod terminationGracePeriodSeconds setting. terminationGracePeriodSeconds: 30 # A list of `Volumes` added into the Gateway Pods. See diff --git a/resources/latest/charts/istiod-remote/Chart.yaml b/resources/latest/charts/istiod-remote/Chart.yaml index 1f72b8275..6957bd426 100644 --- a/resources/latest/charts/istiod-remote/Chart.yaml +++ b/resources/latest/charts/istiod-remote/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +appVersion: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe description: Helm chart for a remote cluster using an external istio control plane icon: https://istio.io/latest/favicons/android-192x192.png keywords: @@ -8,4 +8,4 @@ keywords: name: istiod-remote sources: - https://github.com/istio/istio -version: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +version: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe diff --git a/resources/latest/charts/istiod-remote/files/injection-template.yaml b/resources/latest/charts/istiod-remote/files/injection-template.yaml index 63bc0e734..f41122f9b 100644 --- a/resources/latest/charts/istiod-remote/files/injection-template.yaml +++ b/resources/latest/charts/istiod-remote/files/injection-template.yaml @@ -125,6 +125,8 @@ spec: {{ if .Values.pilot.cni.enabled -}} - "--run-validation" - "--skip-rule-apply" + {{ else if .Values.global.proxy_init.forceApplyIptables -}} + - "--force-apply" {{ end -}} {{with .Values.global.imagePullPolicy }}imagePullPolicy: "{{.}}"{{end}} {{- if .ProxyConfig.ProxyMetadata }} diff --git a/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.21.yaml b/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.21.yaml index 2b72bd93c..c8da4d2e1 100644 --- a/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.21.yaml +++ b/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.21.yaml @@ -15,6 +15,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: # 1.22 behavioral changes @@ -23,6 +25,9 @@ meshConfig: ISTIO_DELTA_XDS: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" tracing: zipkin: address: zipkin.istio-system:9411 diff --git a/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.22.yaml b/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.22.yaml index 2badb70a5..70d8eb40c 100644 --- a/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.22.yaml +++ b/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.22.yaml @@ -11,6 +11,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: defaultConfig: @@ -19,3 +21,6 @@ meshConfig: ENABLE_DEFERRED_CLUSTER_CREATION: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.23.yaml b/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.23.yaml index f855500b0..636bb6f15 100644 --- a/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.23.yaml +++ b/resources/latest/charts/istiod-remote/files/profile-compatibility-version-1.23.yaml @@ -7,4 +7,13 @@ pilot: # 1.24 behavioral changes ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" - PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" \ No newline at end of file + PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" + +meshConfig: + defaultConfig: + proxyMetadata: + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/istiod-remote/files/profile-openshift-ambient.yaml b/resources/latest/charts/istiod-remote/files/profile-openshift-ambient.yaml deleted file mode 100644 index 444665932..000000000 --- a/resources/latest/charts/istiod-remote/files/profile-openshift-ambient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -meshConfig: - defaultConfig: - proxyMetadata: - ISTIO_META_ENABLE_HBONE: "true" -global: - platform: openshift - variant: distroless - seLinuxOptions: - type: spc_t -cni: - ambient: - enabled: true - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -pilot: - cni: - enabled: true - provider: "multus" - env: - PILOT_ENABLE_AMBIENT: "true" \ No newline at end of file diff --git a/resources/latest/charts/istiod-remote/files/profile-platform-k3d.yaml b/resources/latest/charts/istiod-remote/files/profile-platform-k3d.yaml new file mode 100644 index 000000000..cd86d9ec5 --- /dev/null +++ b/resources/latest/charts/istiod-remote/files/profile-platform-k3d.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /bin diff --git a/resources/latest/charts/istiod-remote/files/profile-platform-k3s.yaml b/resources/latest/charts/istiod-remote/files/profile-platform-k3s.yaml new file mode 100644 index 000000000..f3f2884aa --- /dev/null +++ b/resources/latest/charts/istiod-remote/files/profile-platform-k3s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /var/lib/rancher/k3s/data/current/bin/ diff --git a/resources/latest/charts/istiod-remote/files/profile-platform-microk8s.yaml b/resources/latest/charts/istiod-remote/files/profile-platform-microk8s.yaml new file mode 100644 index 000000000..57d7f5e3c --- /dev/null +++ b/resources/latest/charts/istiod-remote/files/profile-platform-microk8s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/snap/microk8s/current/args/cni-network + cniBinDir: /var/snap/microk8s/current/opt/cni/bin diff --git a/resources/latest/charts/istiod-remote/files/profile-platform-minikube.yaml b/resources/latest/charts/istiod-remote/files/profile-platform-minikube.yaml new file mode 100644 index 000000000..fa9992e20 --- /dev/null +++ b/resources/latest/charts/istiod-remote/files/profile-platform-minikube.yaml @@ -0,0 +1,6 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniNetnsDir: /var/run/docker/netns diff --git a/resources/latest/charts/cni/files/profile-openshift.yaml b/resources/latest/charts/istiod-remote/files/profile-platform-openshift.yaml similarity index 84% rename from resources/latest/charts/cni/files/profile-openshift.yaml rename to resources/latest/charts/istiod-remote/files/profile-platform-openshift.yaml index 38357bd99..69eda2b1d 100644 --- a/resources/latest/charts/cni/files/profile-openshift.yaml +++ b/resources/latest/charts/istiod-remote/files/profile-platform-openshift.yaml @@ -3,18 +3,15 @@ # If you want to make a change in this file, edit the original one and run "make gen". # The OpenShift profile provides a basic set of settings to run Istio on OpenShift -# CNI must be installed. cni: cniBinDir: /var/lib/cni/bin cniConfDir: /etc/cni/multus/net.d chained: false cniConfFileName: "istio-cni.conf" - logLevel: info provider: "multus" -global: - platform: openshift pilot: cni: enabled: true provider: "multus" -platform: openshift \ No newline at end of file +seLinuxOptions: + type: spc_t diff --git a/resources/latest/charts/istiod-remote/templates/clusterrole.yaml b/resources/latest/charts/istiod-remote/templates/clusterrole.yaml index b2eeb92cc..8c25cbf72 100644 --- a/resources/latest/charts/istiod-remote/templates/clusterrole.yaml +++ b/resources/latest/charts/istiod-remote/templates/clusterrole.yaml @@ -28,25 +28,35 @@ rules: resources: ["*"] {{- if .Values.global.istiod.enableAnalysis }} - apiGroups: ["config.istio.io", "security.istio.io", "networking.istio.io", "authentication.istio.io", "rbac.istio.io", "telemetry.istio.io", "extensions.istio.io"] - verbs: ["update"] - # TODO: should be on just */status but wildcard is not supported - resources: ["*"] - - # Needed because status reporter sets the config map owner reference to the istiod pod - - apiGroups: [""] - verbs: ["update"] - resources: ["pods/finalizers"] + verbs: ["update", "patch"] + resources: + - authorizationpolicies/status + - destinationrules/status + - envoyfilters/status + - gateways/status + - peerauthentications/status + - proxyconfigs/status + - requestauthentications/status + - serviceentries/status + - sidecars/status + - telemetries/status + - virtualservices/status + - wasmplugins/status + - workloadentries/status + - workloadgroups/status {{- end }} - apiGroups: ["networking.istio.io"] verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] resources: [ "workloadentries" ] - apiGroups: ["networking.istio.io"] verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] - resources: [ "workloadentries/status" ] - - - apiGroups: ["networking.istio.io"] - verbs: [ "get", "watch", "list", "update", "patch" ] - resources: [ "serviceentries/status" ] + resources: [ "workloadentries/status", "serviceentries/status" ] + - apiGroups: ["security.istio.io"] + verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] + resources: [ "authorizationpolicies/status" ] + - apiGroups: [""] + verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] + resources: [ "services/status" ] # auto-detect installed CRD definitions - apiGroups: ["apiextensions.k8s.io"] @@ -118,11 +128,20 @@ rules: verbs: ["create"] # Use for Kubernetes Service APIs - - apiGroups: ["networking.x-k8s.io", "gateway.networking.k8s.io"] + - apiGroups: ["gateway.networking.k8s.io"] resources: ["*"] verbs: ["get", "watch", "list"] - - apiGroups: ["networking.x-k8s.io", "gateway.networking.k8s.io"] - resources: ["*"] # TODO: should be on just */status but wildcard is not supported + - apiGroups: ["gateway.networking.k8s.io"] + resources: + - backendtlspolicies/status + - gatewayclasses/status + - gateways/status + - grpcroutes/status + - httproutes/status + - referencegrants/status + - tcproutes/status + - tlsroutes/status + - udproutes/status verbs: ["update", "patch"] - apiGroups: ["gateway.networking.k8s.io"] resources: ["gatewayclasses"] diff --git a/resources/latest/charts/istiod-remote/templates/endpoints.yaml b/resources/latest/charts/istiod-remote/templates/endpoints.yaml index 1cc26dd78..1190dfa9b 100644 --- a/resources/latest/charts/istiod-remote/templates/endpoints.yaml +++ b/resources/latest/charts/istiod-remote/templates/endpoints.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Endpoints metadata: - {{- if .Values.pilot.enabled }} + {{- if .Values.enabled }} name: istiod{{- if .Values.revision }}-{{ .Values.revision}}{{- end }}-remote {{- else }} name: istiod{{- if .Values.revision }}-{{ .Values.revision}}{{- end }} diff --git a/resources/latest/charts/istiod-remote/templates/mutatingwebhook.yaml b/resources/latest/charts/istiod-remote/templates/mutatingwebhook.yaml index 5b7e734e4..fae4ed87a 100644 --- a/resources/latest/charts/istiod-remote/templates/mutatingwebhook.yaml +++ b/resources/latest/charts/istiod-remote/templates/mutatingwebhook.yaml @@ -50,7 +50,7 @@ metadata: operator.istio.io/component: "Pilot" app: sidecar-injector release: {{ .Release.Name }} - app.kubernetes.io/name: "sidecar-injector" + app.kubernetes.io/name: "istiod" {{- include "istio.labels" . | nindent 4 }} webhooks: {{- /* Set up the selectors. First section is for revision, rest is for "default" revision */}} diff --git a/resources/latest/charts/istiod-remote/templates/services.yaml b/resources/latest/charts/istiod-remote/templates/services.yaml index 4290f2848..fe9b701aa 100644 --- a/resources/latest/charts/istiod-remote/templates/services.yaml +++ b/resources/latest/charts/istiod-remote/templates/services.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - {{- if .Values.pilot.enabled }} + {{- if .Values.enabled }} # when local istiod is enabled, we can't use istiod service name to reach the remote control plane name: istiod{{- if .Values.revision }}-{{ .Values.revision}}{{- end }}-remote {{- else }} diff --git a/resources/latest/charts/istiod-remote/templates/zzz_profile.yaml b/resources/latest/charts/istiod-remote/templates/zzz_profile.yaml index b96dcafcb..4eea73812 100644 --- a/resources/latest/charts/istiod-remote/templates/zzz_profile.yaml +++ b/resources/latest/charts/istiod-remote/templates/zzz_profile.yaml @@ -33,6 +33,13 @@ Finally, we can set all of that under .Values so the chart behaves without aware {{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }} {{- end }} {{- end }} +{{- if $globals.platform }} +{{- with $.Files.Get (printf "files/profile-platform-%s.yaml" $globals.platform) }} +{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }} +{{- else }} +{{ fail (cat "unknown platform" $globals.platform) }} +{{- end }} +{{- end }} {{- if $profile }} {{- $a := mustMergeOverwrite $defaults $profile }} {{- end }} diff --git a/resources/latest/charts/istiod-remote/values.yaml b/resources/latest/charts/istiod-remote/values.yaml index 3d511673d..54c4ee7a8 100644 --- a/resources/latest/charts/istiod-remote/values.yaml +++ b/resources/latest/charts/istiod-remote/values.yaml @@ -47,6 +47,8 @@ defaults: volumeMounts: [] # Additional volumes to the istiod pod volumes: [] + # Inject initContainers into the istiod pod + initContainers: [] nodeSelector: {} podAnnotations: {} serviceAnnotations: {} @@ -197,7 +199,7 @@ defaults: # Dev builds from prow are on gcr.io hub: gcr.io/istio-testing # Default tag for Istio images. - tag: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + tag: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe # Variant of the image to use. # Currently supported are: [debug, distroless] variant: "" @@ -315,6 +317,9 @@ defaults: proxy_init: # Base name for the proxy_init container, used to configure iptables. image: proxyv2 + # Bypasses iptables idempotency handling, and attempts to apply iptables rules regardless of table state, which may cause unrecoverable failures. + # Do not use unless you need to work around an issue of the idempotency handling. This flag will be removed in future releases. + forceApplyIptables: false # configure remote pilot and istiod service and endpoint remotePilotAddress: "" ############################################################################################## @@ -428,6 +433,14 @@ defaults: limits: cpu: "2" memory: 1Gi + # If specified, affinity defines the scheduling constraints of waypoint pods. + affinity: {} + # Topology Spread Constraints for the waypoint proxy. + topologySpreadConstraints: [] + # Node labels for the waypoint proxy. + nodeSelector: {} + # Tolerations for the waypoint proxy. + tolerations: [] base: # For istioctl usage to disable istio config crds in base enableIstioConfigCRDs: true diff --git a/resources/latest/charts/istiod/Chart.yaml b/resources/latest/charts/istiod/Chart.yaml index 5dfee6540..e4e4b0de8 100644 --- a/resources/latest/charts/istiod/Chart.yaml +++ b/resources/latest/charts/istiod/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +appVersion: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe description: Helm chart for istio control plane icon: https://istio.io/latest/favicons/android-192x192.png keywords: @@ -9,4 +9,4 @@ keywords: name: istiod sources: - https://github.com/istio/istio -version: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +version: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe diff --git a/resources/latest/charts/istiod/files/injection-template.yaml b/resources/latest/charts/istiod/files/injection-template.yaml index 63bc0e734..f41122f9b 100644 --- a/resources/latest/charts/istiod/files/injection-template.yaml +++ b/resources/latest/charts/istiod/files/injection-template.yaml @@ -125,6 +125,8 @@ spec: {{ if .Values.pilot.cni.enabled -}} - "--run-validation" - "--skip-rule-apply" + {{ else if .Values.global.proxy_init.forceApplyIptables -}} + - "--force-apply" {{ end -}} {{with .Values.global.imagePullPolicy }}imagePullPolicy: "{{.}}"{{end}} {{- if .ProxyConfig.ProxyMetadata }} diff --git a/resources/latest/charts/istiod/files/profile-compatibility-version-1.21.yaml b/resources/latest/charts/istiod/files/profile-compatibility-version-1.21.yaml index 2b72bd93c..c8da4d2e1 100644 --- a/resources/latest/charts/istiod/files/profile-compatibility-version-1.21.yaml +++ b/resources/latest/charts/istiod/files/profile-compatibility-version-1.21.yaml @@ -15,6 +15,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: # 1.22 behavioral changes @@ -23,6 +25,9 @@ meshConfig: ISTIO_DELTA_XDS: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" tracing: zipkin: address: zipkin.istio-system:9411 diff --git a/resources/latest/charts/istiod/files/profile-compatibility-version-1.22.yaml b/resources/latest/charts/istiod/files/profile-compatibility-version-1.22.yaml index 2badb70a5..70d8eb40c 100644 --- a/resources/latest/charts/istiod/files/profile-compatibility-version-1.22.yaml +++ b/resources/latest/charts/istiod/files/profile-compatibility-version-1.22.yaml @@ -11,6 +11,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: defaultConfig: @@ -19,3 +21,6 @@ meshConfig: ENABLE_DEFERRED_CLUSTER_CREATION: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/istiod/files/profile-compatibility-version-1.23.yaml b/resources/latest/charts/istiod/files/profile-compatibility-version-1.23.yaml index f855500b0..636bb6f15 100644 --- a/resources/latest/charts/istiod/files/profile-compatibility-version-1.23.yaml +++ b/resources/latest/charts/istiod/files/profile-compatibility-version-1.23.yaml @@ -7,4 +7,13 @@ pilot: # 1.24 behavioral changes ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" - PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" \ No newline at end of file + PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" + +meshConfig: + defaultConfig: + proxyMetadata: + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/istiod/files/profile-openshift-ambient.yaml b/resources/latest/charts/istiod/files/profile-openshift-ambient.yaml deleted file mode 100644 index 444665932..000000000 --- a/resources/latest/charts/istiod/files/profile-openshift-ambient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -meshConfig: - defaultConfig: - proxyMetadata: - ISTIO_META_ENABLE_HBONE: "true" -global: - platform: openshift - variant: distroless - seLinuxOptions: - type: spc_t -cni: - ambient: - enabled: true - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -pilot: - cni: - enabled: true - provider: "multus" - env: - PILOT_ENABLE_AMBIENT: "true" \ No newline at end of file diff --git a/resources/latest/charts/istiod/files/profile-openshift.yaml b/resources/latest/charts/istiod/files/profile-openshift.yaml deleted file mode 100644 index 38357bd99..000000000 --- a/resources/latest/charts/istiod/files/profile-openshift.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -# The OpenShift profile provides a basic set of settings to run Istio on OpenShift -# CNI must be installed. -cni: - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -global: - platform: openshift -pilot: - cni: - enabled: true - provider: "multus" -platform: openshift \ No newline at end of file diff --git a/resources/latest/charts/istiod/files/profile-platform-k3d.yaml b/resources/latest/charts/istiod/files/profile-platform-k3d.yaml new file mode 100644 index 000000000..cd86d9ec5 --- /dev/null +++ b/resources/latest/charts/istiod/files/profile-platform-k3d.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /bin diff --git a/resources/latest/charts/istiod/files/profile-platform-k3s.yaml b/resources/latest/charts/istiod/files/profile-platform-k3s.yaml new file mode 100644 index 000000000..f3f2884aa --- /dev/null +++ b/resources/latest/charts/istiod/files/profile-platform-k3s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /var/lib/rancher/k3s/data/current/bin/ diff --git a/resources/latest/charts/istiod/files/profile-platform-microk8s.yaml b/resources/latest/charts/istiod/files/profile-platform-microk8s.yaml new file mode 100644 index 000000000..57d7f5e3c --- /dev/null +++ b/resources/latest/charts/istiod/files/profile-platform-microk8s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/snap/microk8s/current/args/cni-network + cniBinDir: /var/snap/microk8s/current/opt/cni/bin diff --git a/resources/latest/charts/istiod/files/profile-platform-minikube.yaml b/resources/latest/charts/istiod/files/profile-platform-minikube.yaml new file mode 100644 index 000000000..fa9992e20 --- /dev/null +++ b/resources/latest/charts/istiod/files/profile-platform-minikube.yaml @@ -0,0 +1,6 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniNetnsDir: /var/run/docker/netns diff --git a/resources/latest/charts/istiod/files/profile-platform-openshift.yaml b/resources/latest/charts/istiod/files/profile-platform-openshift.yaml new file mode 100644 index 000000000..69eda2b1d --- /dev/null +++ b/resources/latest/charts/istiod/files/profile-platform-openshift.yaml @@ -0,0 +1,17 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +# The OpenShift profile provides a basic set of settings to run Istio on OpenShift +cni: + cniBinDir: /var/lib/cni/bin + cniConfDir: /etc/cni/multus/net.d + chained: false + cniConfFileName: "istio-cni.conf" + provider: "multus" +pilot: + cni: + enabled: true + provider: "multus" +seLinuxOptions: + type: spc_t diff --git a/resources/latest/charts/istiod/files/waypoint.yaml b/resources/latest/charts/istiod/files/waypoint.yaml index e01409503..ed3d59397 100644 --- a/resources/latest/charts/istiod/files/waypoint.yaml +++ b/resources/latest/charts/istiod/files/waypoint.yaml @@ -68,6 +68,22 @@ spec: "gateway.istio.io/managed" "istio.io-mesh-controller" ) | nindent 8}} spec: + {{- if .Values.global.waypoint.affinity }} + affinity: + {{- toYaml .Values.global.waypoint.affinity | nindent 8 }} + {{- end }} + {{- if .Values.global.waypoint.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml .Values.global.waypoint.topologySpreadConstraints | nindent 8 }} + {{- end }} + {{- if .Values.global.waypoint.nodeSelector }} + nodeSelector: + {{- toYaml .Values.global.waypoint.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.global.waypoint.tolerations }} + tolerations: + {{- toYaml .Values.global.waypoint.tolerations | nindent 8 }} + {{- end }} terminationGracePeriodSeconds: 2 serviceAccountName: {{.ServiceAccount | quote}} containers: diff --git a/resources/latest/charts/istiod/templates/clusterrole.yaml b/resources/latest/charts/istiod/templates/clusterrole.yaml index ac86e28b6..fb5dd398a 100644 --- a/resources/latest/charts/istiod/templates/clusterrole.yaml +++ b/resources/latest/charts/istiod/templates/clusterrole.yaml @@ -27,25 +27,35 @@ rules: resources: ["*"] {{- if .Values.global.istiod.enableAnalysis }} - apiGroups: ["config.istio.io", "security.istio.io", "networking.istio.io", "authentication.istio.io", "rbac.istio.io", "telemetry.istio.io", "extensions.istio.io"] - verbs: ["update"] - # TODO: should be on just */status but wildcard is not supported - resources: ["*"] - - # Needed because status reporter sets the config map owner reference to the istiod pod - - apiGroups: [""] - verbs: ["update"] - resources: ["pods/finalizers"] + verbs: ["update", "patch"] + resources: + - authorizationpolicies/status + - destinationrules/status + - envoyfilters/status + - gateways/status + - peerauthentications/status + - proxyconfigs/status + - requestauthentications/status + - serviceentries/status + - sidecars/status + - telemetries/status + - virtualservices/status + - wasmplugins/status + - workloadentries/status + - workloadgroups/status {{- end }} - apiGroups: ["networking.istio.io"] verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] resources: [ "workloadentries" ] - apiGroups: ["networking.istio.io"] verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] - resources: [ "workloadentries/status" ] - - - apiGroups: ["networking.istio.io"] - verbs: [ "get", "watch", "list", "update", "patch" ] - resources: [ "serviceentries/status" ] + resources: [ "workloadentries/status", "serviceentries/status" ] + - apiGroups: ["security.istio.io"] + verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] + resources: [ "authorizationpolicies/status" ] + - apiGroups: [""] + verbs: [ "get", "watch", "list", "update", "patch", "create", "delete" ] + resources: [ "services/status" ] # auto-detect installed CRD definitions - apiGroups: ["apiextensions.k8s.io"] @@ -117,11 +127,20 @@ rules: verbs: ["create"] # Use for Kubernetes Service APIs - - apiGroups: ["networking.x-k8s.io", "gateway.networking.k8s.io"] + - apiGroups: ["gateway.networking.k8s.io"] resources: ["*"] verbs: ["get", "watch", "list"] - - apiGroups: ["networking.x-k8s.io", "gateway.networking.k8s.io"] - resources: ["*"] # TODO: should be on just */status but wildcard is not supported + - apiGroups: ["gateway.networking.k8s.io"] + resources: + - backendtlspolicies/status + - gatewayclasses/status + - gateways/status + - grpcroutes/status + - httproutes/status + - referencegrants/status + - tcproutes/status + - tlsroutes/status + - udproutes/status verbs: ["update", "patch"] - apiGroups: ["gateway.networking.k8s.io"] resources: ["gatewayclasses"] diff --git a/resources/latest/charts/istiod/templates/deployment.yaml b/resources/latest/charts/istiod/templates/deployment.yaml index 3b8ea75d9..987db2a71 100644 --- a/resources/latest/charts/istiod/templates/deployment.yaml +++ b/resources/latest/charts/istiod/templates/deployment.yaml @@ -81,6 +81,10 @@ spec: serviceAccountName: istiod{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} {{- if .Values.global.priorityClassName }} priorityClassName: "{{ .Values.global.priorityClassName }}" +{{- end }} +{{- with .Values.initContainers }} + initContainers: + {{- tpl (toYaml .) $ | nindent 8 }} {{- end }} containers: - name: discovery @@ -116,12 +120,19 @@ spec: ports: - containerPort: 8080 protocol: TCP + name: http-debug - containerPort: 15010 protocol: TCP + name: grpc-xds + - containerPort: 15012 + protocol: TCP + name: tls-xds - containerPort: 15017 protocol: TCP + name: https-webhooks - containerPort: 15014 protocol: TCP + name: http-monitoring readinessProbe: httpGet: path: /ready @@ -189,8 +200,9 @@ spec: valueFrom: resourceFieldRef: resource: limits.cpu + divisor: "1" - name: PLATFORM - value: "{{ .Values.global.platform }}" + value: "{{ coalesce .Values.global.platform .Values.platform }}" resources: {{- if .Values.resources }} {{ toYaml .Values.resources | trim | indent 12 }} diff --git a/resources/latest/charts/istiod/templates/mutatingwebhook.yaml b/resources/latest/charts/istiod/templates/mutatingwebhook.yaml index 5b7e734e4..fae4ed87a 100644 --- a/resources/latest/charts/istiod/templates/mutatingwebhook.yaml +++ b/resources/latest/charts/istiod/templates/mutatingwebhook.yaml @@ -50,7 +50,7 @@ metadata: operator.istio.io/component: "Pilot" app: sidecar-injector release: {{ .Release.Name }} - app.kubernetes.io/name: "sidecar-injector" + app.kubernetes.io/name: "istiod" {{- include "istio.labels" . | nindent 4 }} webhooks: {{- /* Set up the selectors. First section is for revision, rest is for "default" revision */}} diff --git a/resources/latest/charts/istiod/templates/revision-tags.yaml b/resources/latest/charts/istiod/templates/revision-tags.yaml index 1d13d62d5..9c1d2784a 100644 --- a/resources/latest/charts/istiod/templates/revision-tags.yaml +++ b/resources/latest/charts/istiod/templates/revision-tags.yaml @@ -46,7 +46,7 @@ metadata: operator.istio.io/component: "Pilot" app: sidecar-injector release: {{ $.Release.Name }} - app.kubernetes.io/name: "sidecar-injector" + app.kubernetes.io/name: "istiod" {{- include "istio.labels" $ | nindent 4 }} webhooks: {{- include "core" (mergeOverwrite (deepCopy $whv) (dict "Prefix" "rev.namespace.") ) }} diff --git a/resources/latest/charts/istiod/templates/zzz_profile.yaml b/resources/latest/charts/istiod/templates/zzz_profile.yaml index b96dcafcb..4eea73812 100644 --- a/resources/latest/charts/istiod/templates/zzz_profile.yaml +++ b/resources/latest/charts/istiod/templates/zzz_profile.yaml @@ -33,6 +33,13 @@ Finally, we can set all of that under .Values so the chart behaves without aware {{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }} {{- end }} {{- end }} +{{- if $globals.platform }} +{{- with $.Files.Get (printf "files/profile-platform-%s.yaml" $globals.platform) }} +{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }} +{{- else }} +{{ fail (cat "unknown platform" $globals.platform) }} +{{- end }} +{{- end }} {{- if $profile }} {{- $a := mustMergeOverwrite $defaults $profile }} {{- end }} diff --git a/resources/latest/charts/istiod/values.yaml b/resources/latest/charts/istiod/values.yaml index bc83baf91..93ebefbd8 100644 --- a/resources/latest/charts/istiod/values.yaml +++ b/resources/latest/charts/istiod/values.yaml @@ -60,6 +60,9 @@ defaults: # Additional volumes to the istiod pod volumes: [] + # Inject initContainers into the istiod pod + initContainers: [] + nodeSelector: {} podAnnotations: {} serviceAnnotations: {} @@ -234,7 +237,7 @@ defaults: # Dev builds from prow are on gcr.io hub: gcr.io/istio-testing # Default tag for Istio images. - tag: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + tag: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe # Variant of the image to use. # Currently supported are: [debug, distroless] variant: "" @@ -377,6 +380,9 @@ defaults: proxy_init: # Base name for the proxy_init container, used to configure iptables. image: proxyv2 + # Bypasses iptables idempotency handling, and attempts to apply iptables rules regardless of table state, which may cause unrecoverable failures. + # Do not use unless you need to work around an issue of the idempotency handling. This flag will be removed in future releases. + forceApplyIptables: false # configure remote pilot and istiod service and endpoint remotePilotAddress: "" @@ -506,6 +512,18 @@ defaults: cpu: "2" memory: 1Gi + # If specified, affinity defines the scheduling constraints of waypoint pods. + affinity: {} + + # Topology Spread Constraints for the waypoint proxy. + topologySpreadConstraints: [] + + # Node labels for the waypoint proxy. + nodeSelector: {} + + # Tolerations for the waypoint proxy. + tolerations: [] + base: # For istioctl usage to disable istio config crds in base enableIstioConfigCRDs: true diff --git a/resources/latest/charts/ztunnel/Chart.yaml b/resources/latest/charts/ztunnel/Chart.yaml index 4eb63bbed..c2d07c68c 100644 --- a/resources/latest/charts/ztunnel/Chart.yaml +++ b/resources/latest/charts/ztunnel/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +appVersion: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe description: Helm chart for istio ztunnel components icon: https://istio.io/latest/favicons/android-192x192.png keywords: @@ -8,4 +8,4 @@ keywords: name: ztunnel sources: - https://github.com/istio/istio -version: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 +version: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe diff --git a/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.21.yaml b/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.21.yaml index 2b72bd93c..c8da4d2e1 100644 --- a/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.21.yaml +++ b/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.21.yaml @@ -15,6 +15,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: # 1.22 behavioral changes @@ -23,6 +25,9 @@ meshConfig: ISTIO_DELTA_XDS: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" tracing: zipkin: address: zipkin.istio-system:9411 diff --git a/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.22.yaml b/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.22.yaml index 2badb70a5..70d8eb40c 100644 --- a/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.22.yaml +++ b/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.22.yaml @@ -11,6 +11,8 @@ pilot: ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" meshConfig: defaultConfig: @@ -19,3 +21,6 @@ meshConfig: ENABLE_DEFERRED_CLUSTER_CREATION: "false" # 1.23 behavioral changes ENABLE_DELIMITED_STATS_TAG_REGEX: "false" + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.23.yaml b/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.23.yaml index f855500b0..636bb6f15 100644 --- a/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.23.yaml +++ b/resources/latest/charts/ztunnel/files/profile-compatibility-version-1.23.yaml @@ -7,4 +7,13 @@ pilot: # 1.24 behavioral changes ENABLE_INBOUND_RETRY_POLICY: "false" EXCLUDE_UNSAFE_503_FROM_DEFAULT_RETRY: "false" - PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" \ No newline at end of file + PREFER_DESTINATIONRULE_TLS_FOR_EXTERNAL_SERVICES: "false" + ENABLE_ENHANCED_DESTINATIONRULE_MERGE: "false" + PILOT_UNIFIED_SIDECAR_SCOPE: "false" + +meshConfig: + defaultConfig: + proxyMetadata: + # 1.24 behaviour changes + ENABLE_DEFERRED_STATS_CREATION: "false" + BYPASS_OVERLOAD_MANAGER_FOR_STATIC_LISTENERS: "false" diff --git a/resources/latest/charts/ztunnel/files/profile-openshift-ambient.yaml b/resources/latest/charts/ztunnel/files/profile-openshift-ambient.yaml deleted file mode 100644 index 444665932..000000000 --- a/resources/latest/charts/ztunnel/files/profile-openshift-ambient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -meshConfig: - defaultConfig: - proxyMetadata: - ISTIO_META_ENABLE_HBONE: "true" -global: - platform: openshift - variant: distroless - seLinuxOptions: - type: spc_t -cni: - ambient: - enabled: true - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -pilot: - cni: - enabled: true - provider: "multus" - env: - PILOT_ENABLE_AMBIENT: "true" \ No newline at end of file diff --git a/resources/latest/charts/ztunnel/files/profile-openshift.yaml b/resources/latest/charts/ztunnel/files/profile-openshift.yaml deleted file mode 100644 index 38357bd99..000000000 --- a/resources/latest/charts/ztunnel/files/profile-openshift.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# WARNING: DO NOT EDIT, THIS FILE IS A COPY. -# The original version of this file is located at /manifests/helm-profiles directory. -# If you want to make a change in this file, edit the original one and run "make gen". - -# The OpenShift profile provides a basic set of settings to run Istio on OpenShift -# CNI must be installed. -cni: - cniBinDir: /var/lib/cni/bin - cniConfDir: /etc/cni/multus/net.d - chained: false - cniConfFileName: "istio-cni.conf" - logLevel: info - provider: "multus" -global: - platform: openshift -pilot: - cni: - enabled: true - provider: "multus" -platform: openshift \ No newline at end of file diff --git a/resources/latest/charts/ztunnel/files/profile-platform-k3d.yaml b/resources/latest/charts/ztunnel/files/profile-platform-k3d.yaml new file mode 100644 index 000000000..cd86d9ec5 --- /dev/null +++ b/resources/latest/charts/ztunnel/files/profile-platform-k3d.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /bin diff --git a/resources/latest/charts/ztunnel/files/profile-platform-k3s.yaml b/resources/latest/charts/ztunnel/files/profile-platform-k3s.yaml new file mode 100644 index 000000000..f3f2884aa --- /dev/null +++ b/resources/latest/charts/ztunnel/files/profile-platform-k3s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + cniBinDir: /var/lib/rancher/k3s/data/current/bin/ diff --git a/resources/latest/charts/ztunnel/files/profile-platform-microk8s.yaml b/resources/latest/charts/ztunnel/files/profile-platform-microk8s.yaml new file mode 100644 index 000000000..57d7f5e3c --- /dev/null +++ b/resources/latest/charts/ztunnel/files/profile-platform-microk8s.yaml @@ -0,0 +1,7 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniConfDir: /var/snap/microk8s/current/args/cni-network + cniBinDir: /var/snap/microk8s/current/opt/cni/bin diff --git a/resources/latest/charts/ztunnel/files/profile-platform-minikube.yaml b/resources/latest/charts/ztunnel/files/profile-platform-minikube.yaml new file mode 100644 index 000000000..fa9992e20 --- /dev/null +++ b/resources/latest/charts/ztunnel/files/profile-platform-minikube.yaml @@ -0,0 +1,6 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +cni: + cniNetnsDir: /var/run/docker/netns diff --git a/resources/latest/charts/ztunnel/files/profile-platform-openshift.yaml b/resources/latest/charts/ztunnel/files/profile-platform-openshift.yaml new file mode 100644 index 000000000..69eda2b1d --- /dev/null +++ b/resources/latest/charts/ztunnel/files/profile-platform-openshift.yaml @@ -0,0 +1,17 @@ +# WARNING: DO NOT EDIT, THIS FILE IS A COPY. +# The original version of this file is located at /manifests/helm-profiles directory. +# If you want to make a change in this file, edit the original one and run "make gen". + +# The OpenShift profile provides a basic set of settings to run Istio on OpenShift +cni: + cniBinDir: /var/lib/cni/bin + cniConfDir: /etc/cni/multus/net.d + chained: false + cniConfFileName: "istio-cni.conf" + provider: "multus" +pilot: + cni: + enabled: true + provider: "multus" +seLinuxOptions: + type: spc_t diff --git a/resources/latest/charts/ztunnel/templates/daemonset.yaml b/resources/latest/charts/ztunnel/templates/daemonset.yaml index 6025871e9..c6e2daf48 100644 --- a/resources/latest/charts/ztunnel/templates/daemonset.yaml +++ b/resources/latest/charts/ztunnel/templates/daemonset.yaml @@ -188,7 +188,7 @@ spec: - name: cni-ztunnel-sock-dir hostPath: path: /var/run/ztunnel - type: DirectoryOrCreate # ideally this would be a socket, but ztunnel may not have started yet. + type: DirectoryOrCreate # ideally this would be a socket, but istio-cni may not have started yet. # pprof needs a writable /tmp, and we don't have that thanks to `readOnlyRootFilesystem: true`, so mount one - name: tmp emptyDir: {} diff --git a/resources/latest/charts/ztunnel/templates/zzz_profile.yaml b/resources/latest/charts/ztunnel/templates/zzz_profile.yaml index 68a66eec6..680b3d5c3 100644 --- a/resources/latest/charts/ztunnel/templates/zzz_profile.yaml +++ b/resources/latest/charts/ztunnel/templates/zzz_profile.yaml @@ -33,6 +33,13 @@ Finally, we can set all of that under .Values so the chart behaves without aware {{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }} {{- end }} {{- end }} +{{- if $globals.platform }} +{{- with $.Files.Get (printf "files/profile-platform-%s.yaml" $globals.platform) }} +{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }} +{{- else }} +{{ fail (cat "unknown platform" $globals.platform) }} +{{- end }} +{{- end }} {{- if $profile }} {{- $a := mustMergeOverwrite $defaults $profile }} {{- end }} diff --git a/resources/latest/charts/ztunnel/values.yaml b/resources/latest/charts/ztunnel/values.yaml index 955528f5b..c5e3ebe7b 100644 --- a/resources/latest/charts/ztunnel/values.yaml +++ b/resources/latest/charts/ztunnel/values.yaml @@ -4,7 +4,7 @@ defaults: # Hub to pull from. Image will be `Hub/Image:Tag-Variant` hub: gcr.io/istio-testing # Tag to pull from. Image will be `Hub/Image:Tag-Variant` - tag: 1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + tag: 1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe # Variant to pull. Options are "debug" or "distroless". Unset will use the default for the given version. variant: "" diff --git a/versions.yaml b/versions.yaml index 116ad15ac..c579186aa 100644 --- a/versions.yaml +++ b/versions.yaml @@ -46,11 +46,11 @@ versions: version: 1.24-alpha repo: https://github.com/istio/istio branch: master - commit: b28bdd77da4c7f0f4f3631db514f1c4f79a90289 + commit: fe2a04689d3b7abf7630dc5646bf825e0c0592fe charts: - - https://storage.googleapis.com/istio-build/dev/1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289/helm/base-1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289.tgz - - https://storage.googleapis.com/istio-build/dev/1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289/helm/cni-1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289.tgz - - https://storage.googleapis.com/istio-build/dev/1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289/helm/gateway-1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289.tgz - - https://storage.googleapis.com/istio-build/dev/1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289/helm/istiod-1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289.tgz - - https://storage.googleapis.com/istio-build/dev/1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289/helm/istiod-remote-1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289.tgz - - https://storage.googleapis.com/istio-build/dev/1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289/helm/ztunnel-1.24-alpha.b28bdd77da4c7f0f4f3631db514f1c4f79a90289.tgz + - https://storage.googleapis.com/istio-build/dev/1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe/helm/base-1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe.tgz + - https://storage.googleapis.com/istio-build/dev/1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe/helm/cni-1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe.tgz + - https://storage.googleapis.com/istio-build/dev/1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe/helm/gateway-1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe.tgz + - https://storage.googleapis.com/istio-build/dev/1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe/helm/istiod-1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe.tgz + - https://storage.googleapis.com/istio-build/dev/1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe/helm/istiod-remote-1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe.tgz + - https://storage.googleapis.com/istio-build/dev/1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe/helm/ztunnel-1.24-alpha.fe2a04689d3b7abf7630dc5646bf825e0c0592fe.tgz From 9d12b6f8e7a9d1c5999451a308fa946bf24fe85b Mon Sep 17 00:00:00 2001 From: Travis White Date: Fri, 20 Sep 2024 02:44:19 -0500 Subject: [PATCH 06/25] added helm overrides for proxy image as is available for operator (#339) * added helm overrides for proxy image as is available for operator Signed-off-by: Travis White * `make gen` results Signed-off-by: Travis White --------- Signed-off-by: Travis White Co-authored-by: Daniel Grimm --- bundle/manifests/sailoperator.clusterserviceversion.yaml | 1 + chart/templates/deployment.yaml | 3 ++- chart/values.yaml | 3 +++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/bundle/manifests/sailoperator.clusterserviceversion.yaml b/bundle/manifests/sailoperator.clusterserviceversion.yaml index 47e035fbc..de1cd4691 100644 --- a/bundle/manifests/sailoperator.clusterserviceversion.yaml +++ b/bundle/manifests/sailoperator.clusterserviceversion.yaml @@ -643,6 +643,7 @@ spec: - --logtostderr=true - --v=0 image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 + imagePullPolicy: Always name: kube-rbac-proxy ports: - containerPort: 8443 diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 0067d9eaa..02c4c4646 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -53,7 +53,8 @@ spec: - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 + image: {{ .Values.proxy.image }} + imagePullPolicy: {{ .Values.proxy.imagePullPolicy }} name: kube-rbac-proxy ports: - containerPort: 8443 diff --git a/chart/values.yaml b/chart/values.yaml index d86bac087..75d8d8799 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -44,6 +44,9 @@ csv: features.operators.openshift.io/csi: "false" image: quay.io/maistra-dev/sail-operator:0.2-latest imagePullPolicy: Always +proxy: + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 + imagePullPolicy: Always # setting this to true will add resources required to generate the bundle using operator-sdk bundleGeneration: false From 1dff7d97be92323dfcd6e6f21795011f67be5fe3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Luk=C5=A1a?= Date: Fri, 20 Sep 2024 12:03:19 +0200 Subject: [PATCH 07/25] Show name of active revision in Istio/RemoteIstio status (#350) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This helps users know the name of the currently active revision. Previously, the users had to just know that for the InPlace strategy, the name of the revision is the same as the name of the Istio/RemoteIstio resource, and for the RevisionBased strategy, the revision name is the Istio/RemoteIstio name + version. Signed-off-by: Marko Lukša --- api/v1alpha1/istio_types.go | 6 +++++- api/v1alpha1/remoteistio_types.go | 6 +++++- bundle/manifests/sailoperator.io_istios.yaml | 9 ++++++++- bundle/manifests/sailoperator.io_remoteistios.yaml | 9 ++++++++- chart/crds/sailoperator.io_istios.yaml | 9 ++++++++- chart/crds/sailoperator.io_remoteistios.yaml | 9 ++++++++- controllers/istio/istio_controller.go | 1 + controllers/istio/istio_controller_test.go | 10 +++++++++- controllers/remoteistio/remoteistio_controller.go | 1 + controllers/remoteistio/remoteistio_controller_test.go | 10 +++++++++- docs/api-reference/sailoperator.io.md | 2 ++ 11 files changed, 64 insertions(+), 8 deletions(-) diff --git a/api/v1alpha1/istio_types.go b/api/v1alpha1/istio_types.go index dfeb27df3..ba39fdc06 100644 --- a/api/v1alpha1/istio_types.go +++ b/api/v1alpha1/istio_types.go @@ -114,6 +114,9 @@ type IstioStatus struct { // Reports the current state of the object. State IstioConditionReason `json:"state,omitempty"` + // The name of the active revision. + ActiveRevisionName string `json:"activeRevisionName,omitempty"` + // Reports information about the underlying IstioRevisions. Revisions RevisionSummary `json:"revisions,omitempty"` } @@ -238,7 +241,8 @@ const ( // +kubebuilder:printcolumn:name="Revisions",type="string",JSONPath=".status.revisions.total",description="Total number of IstioRevision objects currently associated with this object." // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.revisions.ready",description="Number of revisions that are ready." // +kubebuilder:printcolumn:name="In use",type="string",JSONPath=".status.revisions.inUse",description="Number of revisions that are currently being used by workloads." -// +kubebuilder:printcolumn:name="Active Revision",type="string",JSONPath=".status.state",description="The current state of the active revision." +// +kubebuilder:printcolumn:name="Active Revision",type="string",JSONPath=".status.activeRevisionName",description="The name of the currently active revision." +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The current state of the active revision." // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of the control plane installation." // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the object" diff --git a/api/v1alpha1/remoteistio_types.go b/api/v1alpha1/remoteistio_types.go index 1494a9bc7..dbfe3707c 100644 --- a/api/v1alpha1/remoteistio_types.go +++ b/api/v1alpha1/remoteistio_types.go @@ -71,6 +71,9 @@ type RemoteIstioStatus struct { // Reports the current state of the object. State RemoteIstioConditionReason `json:"state,omitempty"` + // The name of the active revision. + ActiveRevisionName string `json:"activeRevisionName,omitempty"` + // Reports information about the underlying IstioRevisions. Revisions RevisionSummary `json:"revisions,omitempty"` } @@ -183,7 +186,8 @@ const ( // +kubebuilder:printcolumn:name="Revisions",type="string",JSONPath=".status.revisions.total",description="Total number of IstioRevision objects currently associated with this object." // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.revisions.ready",description="Number of revisions that are ready." // +kubebuilder:printcolumn:name="In use",type="string",JSONPath=".status.revisions.inUse",description="Number of revisions that are currently being used by workloads." -// +kubebuilder:printcolumn:name="Active Revision",type="string",JSONPath=".status.state",description="The current state of the active revision." +// +kubebuilder:printcolumn:name="Active Revision",type="string",JSONPath=".status.activeRevisionName",description="The name of the currently active revision." +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The current state of the active revision." // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of the control plane installation." // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the object" diff --git a/bundle/manifests/sailoperator.io_istios.yaml b/bundle/manifests/sailoperator.io_istios.yaml index 3ba422ba5..4057df688 100644 --- a/bundle/manifests/sailoperator.io_istios.yaml +++ b/bundle/manifests/sailoperator.io_istios.yaml @@ -30,9 +30,13 @@ spec: jsonPath: .status.revisions.inUse name: In use type: string + - description: The name of the currently active revision. + jsonPath: .status.activeRevisionName + name: Active Revision + type: string - description: The current state of the active revision. jsonPath: .status.state - name: Active Revision + name: Status type: string - description: The version of the control plane installation. jsonPath: .spec.version @@ -8001,6 +8005,9 @@ spec: status: description: IstioStatus defines the observed state of Istio properties: + activeRevisionName: + description: The name of the active revision. + type: string conditions: description: Represents the latest available observations of the object's current state. diff --git a/bundle/manifests/sailoperator.io_remoteistios.yaml b/bundle/manifests/sailoperator.io_remoteistios.yaml index f77d7cc7a..19c17f37d 100644 --- a/bundle/manifests/sailoperator.io_remoteistios.yaml +++ b/bundle/manifests/sailoperator.io_remoteistios.yaml @@ -30,9 +30,13 @@ spec: jsonPath: .status.revisions.inUse name: In use type: string + - description: The name of the currently active revision. + jsonPath: .status.activeRevisionName + name: Active Revision + type: string - description: The current state of the active revision. jsonPath: .status.state - name: Active Revision + name: Status type: string - description: The version of the control plane installation. jsonPath: .spec.version @@ -7996,6 +8000,9 @@ spec: status: description: RemoteIstioStatus defines the observed state of RemoteIstio properties: + activeRevisionName: + description: The name of the active revision. + type: string conditions: description: Represents the latest available observations of the object's current state. diff --git a/chart/crds/sailoperator.io_istios.yaml b/chart/crds/sailoperator.io_istios.yaml index b9a5f37fa..8ccbb7dcc 100644 --- a/chart/crds/sailoperator.io_istios.yaml +++ b/chart/crds/sailoperator.io_istios.yaml @@ -30,9 +30,13 @@ spec: jsonPath: .status.revisions.inUse name: In use type: string + - description: The name of the currently active revision. + jsonPath: .status.activeRevisionName + name: Active Revision + type: string - description: The current state of the active revision. jsonPath: .status.state - name: Active Revision + name: Status type: string - description: The version of the control plane installation. jsonPath: .spec.version @@ -8001,6 +8005,9 @@ spec: status: description: IstioStatus defines the observed state of Istio properties: + activeRevisionName: + description: The name of the active revision. + type: string conditions: description: Represents the latest available observations of the object's current state. diff --git a/chart/crds/sailoperator.io_remoteistios.yaml b/chart/crds/sailoperator.io_remoteistios.yaml index 75f001835..edc2c67d5 100644 --- a/chart/crds/sailoperator.io_remoteistios.yaml +++ b/chart/crds/sailoperator.io_remoteistios.yaml @@ -30,9 +30,13 @@ spec: jsonPath: .status.revisions.inUse name: In use type: string + - description: The name of the currently active revision. + jsonPath: .status.activeRevisionName + name: Active Revision + type: string - description: The current state of the active revision. jsonPath: .status.state - name: Active Revision + name: Status type: string - description: The version of the control plane installation. jsonPath: .spec.version @@ -7996,6 +8000,9 @@ spec: status: description: RemoteIstioStatus defines the observed state of RemoteIstio properties: + activeRevisionName: + description: The name of the active revision. + type: string conditions: description: Represents the latest available observations of the object's current state. diff --git a/controllers/istio/istio_controller.go b/controllers/istio/istio_controller.go index f7644e0d8..b46119796 100644 --- a/controllers/istio/istio_controller.go +++ b/controllers/istio/istio_controller.go @@ -207,6 +207,7 @@ func (r *Reconciler) determineStatus(ctx context.Context, istio *v1alpha1.Istio, }) status.State = v1alpha1.IstioReasonReconcileError } else { + status.ActiveRevisionName = getActiveRevisionName(istio) rev, err := r.getActiveRevision(ctx, istio) if apierrors.IsNotFound(err) { revisionNotFound := func(conditionType v1alpha1.IstioConditionType) v1alpha1.IstioCondition { diff --git a/controllers/istio/istio_controller_test.go b/controllers/istio/istio_controller_test.go index 020529823..1a5150a53 100644 --- a/controllers/istio/istio_controller_test.go +++ b/controllers/istio/istio_controller_test.go @@ -367,6 +367,7 @@ func TestDetermineStatus(t *testing.T) { Message: "ready message", }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: 2, Ready: 1, @@ -398,6 +399,7 @@ func TestDetermineStatus(t *testing.T) { Status: metav1.ConditionTrue, }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: 3, Ready: 2, @@ -425,6 +427,7 @@ func TestDetermineStatus(t *testing.T) { Message: "active IstioRevision not found", }, }, + ActiveRevisionName: istioKey.Name, }, }, { @@ -455,7 +458,8 @@ func TestDetermineStatus(t *testing.T) { Message: "failed to get active IstioRevision: get failed: simulated error", }, }, - Revisions: v1alpha1.RevisionSummary{}, + ActiveRevisionName: istioKey.Name, + Revisions: v1alpha1.RevisionSummary{}, }, }, { @@ -486,6 +490,7 @@ func TestDetermineStatus(t *testing.T) { Message: "active IstioRevision not found", }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: -1, Ready: -1, @@ -587,6 +592,7 @@ func TestUpdateStatus(t *testing.T) { Message: "active IstioRevision not found", }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: -1, Ready: -1, @@ -625,6 +631,7 @@ func TestUpdateStatus(t *testing.T) { LastTransitionTime: *oneMinuteAgo, }, }, + ActiveRevisionName: istioKey.Name, }, }, revisions: []v1alpha1.IstioRevision{ @@ -673,6 +680,7 @@ func TestUpdateStatus(t *testing.T) { Message: "ready message", }, }, + ActiveRevisionName: istioKey.Name, }, disallowWrites: true, wantErr: false, diff --git a/controllers/remoteistio/remoteistio_controller.go b/controllers/remoteistio/remoteistio_controller.go index 32b2ac028..31c70a51c 100644 --- a/controllers/remoteistio/remoteistio_controller.go +++ b/controllers/remoteistio/remoteistio_controller.go @@ -206,6 +206,7 @@ func (r *Reconciler) determineStatus(ctx context.Context, istio *v1alpha1.Remote }) status.State = v1alpha1.RemoteIstioReasonReconcileError } else { + status.ActiveRevisionName = getActiveRevisionName(istio) rev, err := r.getActiveRevision(ctx, istio) if apierrors.IsNotFound(err) { revisionNotFound := func(conditionType v1alpha1.RemoteIstioConditionType) v1alpha1.RemoteIstioCondition { diff --git a/controllers/remoteistio/remoteistio_controller_test.go b/controllers/remoteistio/remoteistio_controller_test.go index 9e4f3732c..c9ff33f0a 100644 --- a/controllers/remoteistio/remoteistio_controller_test.go +++ b/controllers/remoteistio/remoteistio_controller_test.go @@ -367,6 +367,7 @@ func TestDetermineStatus(t *testing.T) { Message: "ready message", }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: 2, Ready: 1, @@ -398,6 +399,7 @@ func TestDetermineStatus(t *testing.T) { Status: metav1.ConditionTrue, }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: 3, Ready: 2, @@ -425,6 +427,7 @@ func TestDetermineStatus(t *testing.T) { Message: "active IstioRevision not found", }, }, + ActiveRevisionName: istioKey.Name, }, }, { @@ -455,7 +458,8 @@ func TestDetermineStatus(t *testing.T) { Message: "failed to get active IstioRevision: get failed: simulated error", }, }, - Revisions: v1alpha1.RevisionSummary{}, + ActiveRevisionName: istioKey.Name, + Revisions: v1alpha1.RevisionSummary{}, }, }, { @@ -486,6 +490,7 @@ func TestDetermineStatus(t *testing.T) { Message: "active IstioRevision not found", }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: -1, Ready: -1, @@ -587,6 +592,7 @@ func TestUpdateStatus(t *testing.T) { Message: "active IstioRevision not found", }, }, + ActiveRevisionName: istioKey.Name, Revisions: v1alpha1.RevisionSummary{ Total: -1, Ready: -1, @@ -625,6 +631,7 @@ func TestUpdateStatus(t *testing.T) { LastTransitionTime: *oneMinuteAgo, }, }, + ActiveRevisionName: istioKey.Name, }, }, revisions: []v1alpha1.IstioRevision{ @@ -673,6 +680,7 @@ func TestUpdateStatus(t *testing.T) { Message: "ready message", }, }, + ActiveRevisionName: istioKey.Name, }, disallowWrites: true, wantErr: false, diff --git a/docs/api-reference/sailoperator.io.md b/docs/api-reference/sailoperator.io.md index d8e479b79..ce6e08862 100644 --- a/docs/api-reference/sailoperator.io.md +++ b/docs/api-reference/sailoperator.io.md @@ -928,6 +928,7 @@ _Appears in:_ | `observedGeneration` _integer_ | ObservedGeneration is the most recent generation observed for this Istio object. It corresponds to the object's generation, which is updated on mutation by the API Server. The information in the status pertains to this particular generation of the object. | | | | `conditions` _[IstioCondition](#istiocondition) array_ | Represents the latest available observations of the object's current state. | | | | `state` _[IstioConditionReason](#istioconditionreason)_ | Reports the current state of the object. | | | +| `activeRevisionName` _string_ | The name of the active revision. | | | | `revisions` _[RevisionSummary](#revisionsummary)_ | Reports information about the underlying IstioRevisions. | | | @@ -2533,6 +2534,7 @@ _Appears in:_ | `observedGeneration` _integer_ | ObservedGeneration is the most recent generation observed for this RemoteIstio object. It corresponds to the object's generation, which is updated on mutation by the API Server. The information in the status pertains to this particular generation of the object. | | | | `conditions` _[RemoteIstioCondition](#remoteistiocondition) array_ | Represents the latest available observations of the object's current state. | | | | `state` _[RemoteIstioConditionReason](#remoteistioconditionreason)_ | Reports the current state of the object. | | | +| `activeRevisionName` _string_ | The name of the active revision. | | | | `revisions` _[RevisionSummary](#revisionsummary)_ | Reports information about the underlying IstioRevisions. | | | From c5e80218c2ab898b361f4a949f0e6eeb53ab14ad Mon Sep 17 00:00:00 2001 From: Nick Fox Date: Fri, 20 Sep 2024 11:04:21 -0400 Subject: [PATCH 08/25] Add External Controlplane to user docs (#335) * Add external controlplane to docs Signed-off-by: Nick Fox * Remove extra fields from examples and update some descriptions Signed-off-by: Nick Fox * Caps consistently and fix copy/pasta errors Signed-off-by: Nick Fox * Apply suggestions from code review Co-authored-by: Sridhar Gaddam Signed-off-by: Nick Fox --------- Signed-off-by: Nick Fox Co-authored-by: Sridhar Gaddam --- docs/README.md | 290 +++++++++++++++- docs/multicluster/controlplane-gateway.yaml | 355 ++++++++++++++++++++ 2 files changed, 641 insertions(+), 4 deletions(-) create mode 100644 docs/multicluster/controlplane-gateway.yaml diff --git a/docs/README.md b/docs/README.md index 3a7a4069f..a950d7ac6 100644 --- a/docs/README.md +++ b/docs/README.md @@ -22,6 +22,11 @@ - [RevisionBased](#revisionbased) - [Example using the RevisionBased strategy](#example-using-the-revisionbased-strategy) - [Multi-cluster](#multi-cluster) + - [Prerequisites](#prerequisites) + - [Common Setup](#common-setup) + - [Multi-Primary](#multi-primary---multi-network) + - [Primary-Remote](#primary-remote---multi-network) + - [External Control Plane](#external-controlplane) - [Addons](#addons) - [Deploy Prometheus and Jaeger addons](#deploy-prometheus-and-jaeger-addons) - [Deploy Kiali addon](#deploy-kiali-addon) @@ -472,13 +477,12 @@ Steps: You can use the Sail Operator and the Sail CRDs to manage a multi-cluster Istio deployment. The following instructions are adapted from the [Istio multi-cluster documentation](https://istio.io/latest/docs/setup/install/multicluster/) to demonstrate how you can setup the various deployment models with Sail. Please familiarize yourself with the different [deployment models](https://istio.io/latest/docs/ops/deployment/deployment-models/) before starting. -*Prerequisites* - -Each deployment model requires you to install the Sail Operator and the Sail CRDs to every cluster that is part of the mesh. +### Prerequisites - Install [istioctl](https://istio.io/latest/docs/setup/install/istioctl) and have it included in your `$PATH`. - Two kubernetes clusters with external lb support. (If using kind, `cloud-provider-kind` is running in the background) - kubeconfig file with a context for each cluster. +- Install the Sail Operator and the Sail CRDs to every cluster. ### Common Setup @@ -502,7 +506,7 @@ These steps are common to every multi-cluster deployment and should be completed kubectl get ns istio-system --context "${CTX_CLUSTER2}" || kubectl create namespace istio-system --context "${CTX_CLUSTER2}" ``` -2. Create shared trust and add intermediate CAs to each cluster. +3. Create shared trust and add intermediate CAs to each cluster. If you already have a [shared trust](https://istio.io/latest/docs/setup/install/multicluster/before-you-begin/#configure-trust) for each cluster you can skip this. Otherwise, you can use the instructions below to create a shared trust and push the intermediate CAs into your clusters. @@ -868,6 +872,284 @@ In this setup there is a Primary cluster (`cluster1`) and a Remote cluster (`clu kubectl delete ns sample --context="${CTX_CLUSTER2}" ``` +### External Control Plane + +These instructions install an [external control plane](https://istio.io/latest/docs/setup/install/external-controlplane/) Istio deployment using the Sail Operator and Sail CRDs. **Before you begin**, ensure you meet the requirements of the [common setup](#common-setup) and complete **only** the "Setup env vars" step. Unlike other Multi-Cluster deployments, you won't be creating a common CA in this setup. + +These installation instructions are adapted from [Istio's external control plane documentation](https://istio.io/latest/docs/setup/install/external-controlplane/) and are intended to be run in a development environment, such as `kind`, rather than in production. + +In this setup there is an external control plane cluster (`cluster1`) and a remote cluster (`cluster2`) which are on separate networks. + +1. Create an `Istio` resource on `cluster1` to manage the ingress gateways for the external control plane. + + ```sh + kubectl create namespace istio-system --context "${CTX_CLUSTER1}" + kubectl apply --context "${CTX_CLUSTER1}" -f - < /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl apply -f - --context="${CTX_CLUSTER2}"; } + ``` + + Expose `helloworld` through the ingress gateway. + ```sh + kubectl apply -f https://raw.githubusercontent.com/istio/istio/${ISTIO_VERSION}/samples/helloworld/gateway-api/helloworld-gateway.yaml -n sample --context="${CTX_CLUSTER2}" + kubectl -n sample --context="${CTX_CLUSTER2}" wait --for=condition=programmed gtw helloworld-gateway + ``` + + Confirm you can access the `helloworld` application through the ingress gateway created in the Remote cluster. + ```sh + curl -s "http://$(kubectl -n sample --context="${CTX_CLUSTER2}" get gtw helloworld-gateway -o jsonpath='{.status.addresses[0].value}'):80/hello" + ``` + You should see a response from the `helloworld` application: + ```sh + Hello version: v1, instance: helloworld-v1-6d65866976-jb6qc + ``` + +15. Cleanup + + ```sh + kubectl delete istios default --context="${CTX_CLUSTER1}" + kubectl delete ns istio-system --context="${CTX_CLUSTER1}" + kubectl delete istios external-istiod --context="${CTX_CLUSTER1}" + kubectl delete ns external-istiod --context="${CTX_CLUSTER1}" + kubectl delete remoteistios external-istiod --context="${CTX_CLUSTER2}" + kubectl delete ns external-istiod --context="${CTX_CLUSTER2}" + kubectl delete ns sample --context="${CTX_CLUSTER2}" + ``` + ## Addons Addons are managed separately from the Sail Operator. You can follow the [istio documentation](https://istio.io/latest/docs/ops/integrations/) for how to install addons. Below is an example of how to install some addons for Istio. diff --git a/docs/multicluster/controlplane-gateway.yaml b/docs/multicluster/controlplane-gateway.yaml new file mode 100644 index 000000000..33689f854 --- /dev/null +++ b/docs/multicluster/controlplane-gateway.yaml @@ -0,0 +1,355 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: istio-ingressgateway + install.operator.istio.io/owning-resource: unknown + istio: ingressgateway + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway-service-account + namespace: istio-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: istio-ingressgateway + install.operator.istio.io/owning-resource: unknown + istio: ingressgateway + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway + namespace: istio-system +spec: + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + annotations: + istio.io/rev: default + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/inject: "false" + labels: + app: istio-ingressgateway + chart: gateways + heritage: Tiller + install.operator.istio.io/owning-resource: unknown + istio: ingressgateway + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + service.istio.io/canonical-name: istio-ingressgateway + service.istio.io/canonical-revision: latest + sidecar.istio.io/inject: "false" + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: null + requiredDuringSchedulingIgnoredDuringExecution: null + containers: + - args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --proxyLogLevel=warning + - --proxyComponentLogLevel=misc:error + - --log_output_level=default:info + env: + - name: PILOT_CERT_PROVIDER + value: istiod + - name: CA_ADDR + value: istiod.istio-system.svc:15012 + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: ISTIO_CPU_LIMIT + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://apis/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_MESH_ID + value: cluster.local + - name: TRUST_DOMAIN + value: cluster.local + - name: ISTIO_META_UNPRIVILEGED_POD + value: "true" + - name: ISTIO_META_CLUSTER_ID + value: Kubernetes + - name: ISTIO_META_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: docker.io/istio/proxyv2:1.22.1 + name: istio-proxy + ports: + - containerPort: 15021 + protocol: TCP + - containerPort: 15012 + protocol: TCP + - containerPort: 15017 + protocol: TCP + - containerPort: 15090 + name: http-envoy-prom + protocol: TCP + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15021 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /var/run/secrets/workload-spiffe-uds + name: workload-socket + - mountPath: /var/run/secrets/credential-uds + name: credential-socket + - mountPath: /var/run/secrets/workload-spiffe-credentials + name: workload-certs + - mountPath: /etc/istio/proxy + name: istio-envoy + - mountPath: /etc/istio/config + name: config-volume + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/run/secrets/tokens + name: istio-token + readOnly: true + - mountPath: /var/lib/istio/data + name: istio-data + - mountPath: /etc/istio/pod + name: podinfo + - mountPath: /etc/istio/ingressgateway-certs + name: ingressgateway-certs + readOnly: true + - mountPath: /etc/istio/ingressgateway-ca-certs + name: ingressgateway-ca-certs + readOnly: true + securityContext: + runAsGroup: 1337 + runAsNonRoot: true + runAsUser: 1337 + serviceAccountName: istio-ingressgateway-service-account + volumes: + - emptyDir: {} + name: workload-socket + - emptyDir: {} + name: credential-socket + - emptyDir: {} + name: workload-certs + - configMap: + name: istio-ca-root-cert + name: istiod-ca-cert + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + - fieldRef: + fieldPath: metadata.annotations + path: annotations + name: podinfo + - emptyDir: {} + name: istio-envoy + - emptyDir: {} + name: istio-data + - name: istio-token + projected: + sources: + - serviceAccountToken: + audience: istio-ca + expirationSeconds: 43200 + path: istio-token + - configMap: + name: istio + optional: true + name: config-volume + - name: ingressgateway-certs + secret: + optional: true + secretName: istio-ingressgateway-certs + - name: ingressgateway-ca-certs + secret: + optional: true + secretName: istio-ingressgateway-ca-certs + +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + app: istio-ingressgateway + install.operator.istio.io/owning-resource: unknown + istio: ingressgateway + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway + namespace: istio-system +spec: + minAvailable: 1 + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + install.operator.istio.io/owning-resource: unknown + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - watch + - list + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + install.operator.istio.io/owning-resource: unknown + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account + +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + labels: + app: istio-ingressgateway + install.operator.istio.io/owning-resource: unknown + istio: ingressgateway + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway + namespace: istio-system +spec: + maxReplicas: 5 + metrics: + - resource: + name: cpu + target: + averageUtilization: 80 + type: Utilization + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + +--- +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app: istio-ingressgateway + install.operator.istio.io/owning-resource: unknown + istio: ingressgateway + istio.io/rev: default + operator.istio.io/component: IngressGateways + release: istio + name: istio-ingressgateway + namespace: istio-system +spec: + ports: + - name: status-port + port: 15021 + protocol: TCP + targetPort: 15021 + - name: tls-xds + port: 15012 + protocol: TCP + targetPort: 15012 + - name: tls-webhook + port: 15017 + protocol: TCP + targetPort: 15017 + selector: + app: istio-ingressgateway + istio: ingressgateway + type: LoadBalancer + +--- From bd6e0482446decb56d55a7344296a29408dee4b4 Mon Sep 17 00:00:00 2001 From: Daniel Grimm Date: Mon, 23 Sep 2024 08:36:22 +0200 Subject: [PATCH 09/25] Use Kubernetes defaults for imagePullPolicy (#352) Signed-off-by: Daniel Grimm --- bundle/manifests/sailoperator.clusterserviceversion.yaml | 4 +--- chart/templates/deployment.yaml | 4 ++++ chart/values.yaml | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/bundle/manifests/sailoperator.clusterserviceversion.yaml b/bundle/manifests/sailoperator.clusterserviceversion.yaml index de1cd4691..69e8b3d42 100644 --- a/bundle/manifests/sailoperator.clusterserviceversion.yaml +++ b/bundle/manifests/sailoperator.clusterserviceversion.yaml @@ -34,7 +34,7 @@ metadata: capabilities: Seamless Upgrades categories: OpenShift Optional, Integration & Delivery, Networking, Security containerImage: quay.io/maistra-dev/sail-operator:0.2-latest - createdAt: "2024-09-19T14:41:31Z" + createdAt: "2024-09-22T10:00:56Z" description: Experimental operator for installing Istio service mesh features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "true" @@ -643,7 +643,6 @@ spec: - --logtostderr=true - --v=0 image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 - imagePullPolicy: Always name: kube-rbac-proxy ports: - containerPort: 8443 @@ -668,7 +667,6 @@ spec: command: - /manager image: quay.io/maistra-dev/sail-operator:0.2-latest - imagePullPolicy: Always livenessProbe: httpGet: path: /healthz diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 02c4c4646..d9fdcfd85 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -54,7 +54,9 @@ spec: - --logtostderr=true - --v=0 image: {{ .Values.proxy.image }} +{{- if .Values.proxy.imagePullPolicy }} imagePullPolicy: {{ .Values.proxy.imagePullPolicy }} +{{- end }} name: kube-rbac-proxy ports: - containerPort: 8443 @@ -81,7 +83,9 @@ spec: command: - /manager image: {{ .Values.image }} +{{- if .Values.proxy.imagePullPolicy }} imagePullPolicy: {{ .Values.imagePullPolicy }} +{{- end }} livenessProbe: httpGet: path: /healthz diff --git a/chart/values.yaml b/chart/values.yaml index 75d8d8799..9dae758ca 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -43,10 +43,12 @@ csv: features.operators.openshift.io/cni: "true" features.operators.openshift.io/csi: "false" image: quay.io/maistra-dev/sail-operator:0.2-latest -imagePullPolicy: Always +# We're commenting out the imagePullPolicy to use k8s defaults +# imagePullPolicy: Always proxy: image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 - imagePullPolicy: Always + # We're commenting out the imagePullPolicy to use k8s defaults + # imagePullPolicy: IfNotPresent # setting this to true will add resources required to generate the bundle using operator-sdk bundleGeneration: false From ab57cdf683aba2beb08b15f5a03b4267af76efc4 Mon Sep 17 00:00:00 2001 From: Francisco Herrera Date: Mon, 23 Sep 2024 19:46:23 +0200 Subject: [PATCH 10/25] Adding E2e multicluster test (#342) * Adding E2e multicluster test Signed-off-by: frherrer * Add istioctl util go build Signed-off-by: frherrer * Delete kind config breaking the test Signed-off-by: frherrer * Removing references to raw.githubusercontent on multicluster related YAML Signed-off-by: frherrer * Updates from review: improve cleanup, minor fixes Signed-off-by: frherrer --------- Signed-off-by: frherrer --- .devcontainer/devcontainer.json | 2 +- Makefile.core.mk | 28 +- common/scripts/kind_provisioner.sh | 13 +- common/scripts/setup_env.sh | 2 +- .../util/supportedversion/supportedversion.go | 27 ++ tests/e2e/common-operator-integ-suite.sh | 14 +- tests/e2e/config/default.yaml | 7 +- tests/e2e/config/multicluster.json | 14 + .../controlplane/control_plane_suite_test.go | 6 +- tests/e2e/controlplane/control_plane_test.go | 22 +- tests/e2e/integ-suite-kind.sh | 50 ++- .../multicluster_multiprimary_test.go | 344 +++++++++++++++++ .../multicluster_primaryremote_test.go | 346 ++++++++++++++++++ .../multicluster/multicluster_suite_test.go | 95 +++++ tests/e2e/operator/operator_suite_test.go | 6 +- tests/e2e/util/certs/certs.go | 280 ++++++++++++++ tests/e2e/util/client/client.go | 20 +- tests/e2e/util/common/e2e_utils.go | 57 ++- tests/e2e/util/istioctl/istioctl.go | 55 +++ tests/e2e/util/kubectl/kubectl.go | 102 +++++- 20 files changed, 1416 insertions(+), 74 deletions(-) create mode 100644 tests/e2e/config/multicluster.json create mode 100644 tests/e2e/multicluster/multicluster_multiprimary_test.go create mode 100644 tests/e2e/multicluster/multicluster_primaryremote_test.go create mode 100644 tests/e2e/multicluster/multicluster_suite_test.go create mode 100644 tests/e2e/util/certs/certs.go create mode 100644 tests/e2e/util/istioctl/istioctl.go diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 0b1d2b6d5..591b24ffa 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "name": "istio build-tools", - "image": "gcr.io/istio-testing/build-tools:master-0aa2afb4bac9a4fd1bfe50a929c077a643066b3a", + "image": "gcr.io/istio-testing/build-tools:master-8584ca511549c1cd96d9cb8b900297de83f4cb64", "privileged": true, "remoteEnv": { "USE_GKE_GCLOUD_AUTH_PLUGIN": "True", diff --git a/Makefile.core.mk b/Makefile.core.mk index 3a3cd3d7f..07a3ce318 100644 --- a/Makefile.core.mk +++ b/Makefile.core.mk @@ -165,8 +165,8 @@ test.e2e.ocp: ## Run the end-to-end tests against an existing OCP cluster. GINKGO_FLAGS="$(GINKGO_FLAGS)" ${SOURCE_DIR}/tests/e2e/integ-suite-ocp.sh .PHONY: test.e2e.kind -test.e2e.kind: ## Deploy a KinD cluster and run the end-to-end tests against it. - GINKGO_FLAGS="$(GINKGO_FLAGS)" ${SOURCE_DIR}/tests/e2e/integ-suite-kind.sh +test.e2e.kind: istioctl ## Deploy a KinD cluster and run the end-to-end tests against it. + GINKGO_FLAGS="$(GINKGO_FLAGS)" ISTIOCTL="$(ISTIOCTL)" ${SOURCE_DIR}/tests/e2e/integ-suite-kind.sh .PHONY: test.e2e.describe test.e2e.describe: ## Runs ginkgo outline -format indent over the e2e test to show in BDD style the steps and test structure @@ -450,6 +450,7 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest GITLEAKS ?= $(LOCALBIN)/gitleaks OPM ?= $(LOCALBIN)/opm +ISTIOCTL ?= $(LOCALBIN)/istioctl ## Tool Versions OPERATOR_SDK_VERSION ?= v1.36.1 @@ -457,6 +458,7 @@ HELM_VERSION ?= v3.15.3 CONTROLLER_TOOLS_VERSION ?= v0.16.0 OPM_VERSION ?= v1.45.0 GITLEAKS_VERSION ?= v8.18.4 +ISTIOCTL_VERSION ?= 1.23.0 # GENERATE_RELATED_IMAGES defines whether `spec.relatedImages` is going to be generated or not # To disable set flag to false @@ -483,6 +485,28 @@ $(OPERATOR_SDK): $(LOCALBIN) curl -sSLfo $(LOCALBIN)/operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) && \ chmod +x $(LOCALBIN)/operator-sdk; +.PHONY: istioctl $(ISTIOCTL) +istioctl: $(ISTIOCTL) ## Download istioctl to bin directory. +istioctl: TARGET_OS=$(shell go env GOOS) +istioctl: TARGET_ARCH=$(shell go env GOARCH) +$(ISTIOCTL): $(LOCALBIN) + @test -s $(LOCALBIN)/istioctl || { \ + OSEXT=$(if $(filter $(TARGET_OS),Darwin),osx,linux); \ + URL="https://github.com/istio/istio/releases/download/$(ISTIOCTL_VERSION)/istioctl-$(ISTIOCTL_VERSION)-$$OSEXT-$(TARGET_ARCH).tar.gz"; \ + echo "Fetching istioctl from $$URL"; \ + curl -fsL $$URL -o /tmp/istioctl.tar.gz || { \ + echo "Download failed! Please check the URL and ISTIO_VERSION."; \ + exit 1; \ + }; \ + tar -xzf /tmp/istioctl.tar.gz -C /tmp || { \ + echo "Extraction failed!"; \ + exit 1; \ + }; \ + mv /tmp/istioctl $(LOCALBIN)/istioctl; \ + rm -f /tmp/istioctl.tar.gz; \ + echo "istioctl has been downloaded and placed in $(LOCALBIN)"; \ + } + .PHONY: controller-gen controller-gen: $(LOCALBIN) ## Download controller-gen to bin directory. If wrong version is installed, it will be overwritten. @test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ diff --git a/common/scripts/kind_provisioner.sh b/common/scripts/kind_provisioner.sh index 9c372b9ca..a2a5691f5 100644 --- a/common/scripts/kind_provisioner.sh +++ b/common/scripts/kind_provisioner.sh @@ -34,6 +34,9 @@ set -x # DEFAULT_KIND_IMAGE is used to set the Kubernetes version for KinD unless overridden in params to setup_kind_cluster(s) DEFAULT_KIND_IMAGE="gcr.io/istio-testing/kind-node:v1.28.4" +# the default kind cluster should be ipv4 if not otherwise specified +IP_FAMILY="${IP_FAMILY:-ipv4}" + # COMMON_SCRIPTS contains the directory this file is in. COMMON_SCRIPTS=$(dirname "${BASH_SOURCE:-$0}") @@ -174,11 +177,6 @@ function setup_kind_cluster() { CONFIG=${DEFAULT_CLUSTER_YAML} fi - # Configure the ipFamily of the cluster - if [ -n "${IP_FAMILY}" ]; then - yq eval ".networking.ipFamily = \"${IP_FAMILY}\"" -i "${CONFIG}" - fi - KIND_WAIT_FLAG="--wait=180s" KIND_DISABLE_CNI="false" if [[ -n "${KUBERNETES_CNI:-}" ]]; then @@ -187,7 +185,8 @@ function setup_kind_cluster() { fi # Create KinD cluster - if ! (yq eval "${CONFIG}" --expression ".networking.disableDefaultCNI = ${KIND_DISABLE_CNI}" | \ + if ! (yq eval "${CONFIG}" --expression ".networking.disableDefaultCNI = ${KIND_DISABLE_CNI}" \ + --expression ".networking.ipFamily = \"${IP_FAMILY}\"" | \ kind create cluster --name="${NAME}" -v4 --retain --image "${IMAGE}" ${KIND_WAIT_FLAG:+"$KIND_WAIT_FLAG"} --config -); then echo "Could not setup KinD environment. Something wrong with KinD setup. Exporting logs." return 9 @@ -478,4 +477,4 @@ function ips_to_cidrs() { from ipaddress import summarize_address_range, IPv4Address [ print(n.compressed) for n in summarize_address_range(IPv4Address(u'$IP_RANGE_START'), IPv4Address(u'$IP_RANGE_END')) ] EOF -} +} \ No newline at end of file diff --git a/common/scripts/setup_env.sh b/common/scripts/setup_env.sh index ee932a946..e2c5b9211 100755 --- a/common/scripts/setup_env.sh +++ b/common/scripts/setup_env.sh @@ -75,7 +75,7 @@ fi TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io} PROJECT_ID=${PROJECT_ID:-istio-testing} if [[ "${IMAGE_VERSION:-}" == "" ]]; then - IMAGE_VERSION=master-0aa2afb4bac9a4fd1bfe50a929c077a643066b3a + IMAGE_VERSION=master-8584ca511549c1cd96d9cb8b900297de83f4cb64 fi if [[ "${IMAGE_NAME:-}" == "" ]]; then IMAGE_NAME=build-tools diff --git a/pkg/test/util/supportedversion/supportedversion.go b/pkg/test/util/supportedversion/supportedversion.go index 59cd44e56..58778d956 100644 --- a/pkg/test/util/supportedversion/supportedversion.go +++ b/pkg/test/util/supportedversion/supportedversion.go @@ -17,6 +17,8 @@ package supportedversion import ( "os" "path/filepath" + "regexp" + "strconv" "github.com/istio-ecosystem/sail-operator/pkg/test/project" "gopkg.in/yaml.v3" @@ -47,6 +49,12 @@ func init() { panic(err) } + // Major, Minor and Patch needs to be set from parsing the version string + for i := range versions.Versions { + v := &versions.Versions[i] + v.Major, v.Minor, v.Patch = parseVersion(v.Version) + } + List = versions.Versions Default = List[0].Name if len(List) > 1 { @@ -55,6 +63,22 @@ func init() { New = List[0].Name } +func parseVersion(version string) (int, int, int) { + // The version can have this formats: "1.22.2", "1.23.0-rc.1", "1.24-alpha" + re := regexp.MustCompile(`^(\d+)\.(\d+)\.?(\d*)`) + + matches := re.FindStringSubmatch(version) + if len(matches) < 4 { + return 0, 0, 0 + } + + major, _ := strconv.Atoi(matches[1]) + minor, _ := strconv.Atoi(matches[2]) + patch, _ := strconv.Atoi(matches[3]) + + return major, minor, patch +} + type Versions struct { Versions []VersionInfo `json:"versions"` } @@ -62,6 +86,9 @@ type Versions struct { type VersionInfo struct { Name string `json:"name"` Version string `json:"version"` + Major int `json:"major"` + Minor int `json:"minor"` + Patch int `json:"patch"` Repo string `json:"repo"` Branch string `json:"branch,omitempty"` Commit string `json:"commit"` diff --git a/tests/e2e/common-operator-integ-suite.sh b/tests/e2e/common-operator-integ-suite.sh index d4fdedb1d..935cd1a3e 100755 --- a/tests/e2e/common-operator-integ-suite.sh +++ b/tests/e2e/common-operator-integ-suite.sh @@ -29,6 +29,7 @@ parse_flags() { SKIP_DEPLOY=${SKIP_DEPLOY:-false} OLM=${OLM:-false} DESCRIBE=false + MULTICLUSTER=false while [ $# -gt 0 ]; do case "$1" in --ocp) @@ -39,6 +40,10 @@ parse_flags() { shift OCP=false ;; + --multicluster) + shift + MULTICLUSTER=true + ;; --skip-build) shift SKIP_BUILD=true @@ -80,6 +85,10 @@ parse_flags() { echo "Running on kind" fi + if [ "${MULTICLUSTER}" == "true" ]; then + echo "Running on multicluster" + fi + if [ "${SKIP_BUILD}" == "true" ]; then echo "Skipping build" fi @@ -108,6 +117,7 @@ initialize_variables() { COMMAND="kubectl" ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}" KUBECONFIG="${KUBECONFIG:-"${ARTIFACTS}/config"}" + ISTIOCTL="${ISTIOCTL:-"istioctl"}" LOCALBIN="${LOCALBIN:-${HOME}/bin}" OPERATOR_SDK=${LOCALBIN}/operator-sdk @@ -258,6 +268,6 @@ fi # Run the go test passing the env variables defined that are going to be used in the operator tests # shellcheck disable=SC2086 IMAGE="${HUB}/${IMAGE_BASE}:${TAG}" SKIP_DEPLOY="${SKIP_DEPLOY}" OCP="${OCP}" ISTIO_MANIFEST="${ISTIO_MANIFEST}" \ -NAMESPACE="${NAMESPACE}" CONTROL_PLANE_NS="${CONTROL_PLANE_NS}" DEPLOYMENT_NAME="${DEPLOYMENT_NAME}" \ -ISTIO_NAME="${ISTIO_NAME}" COMMAND="${COMMAND}" VERSIONS_YAML_FILE="${VERSIONS_YAML_FILE}" KUBECONFIG="${KUBECONFIG}" \ +NAMESPACE="${NAMESPACE}" CONTROL_PLANE_NS="${CONTROL_PLANE_NS}" DEPLOYMENT_NAME="${DEPLOYMENT_NAME}" MULTICLUSTER="${MULTICLUSTER}" ARTIFACTS="${ARTIFACTS}" \ +ISTIO_NAME="${ISTIO_NAME}" COMMAND="${COMMAND}" VERSIONS_YAML_FILE="${VERSIONS_YAML_FILE}" KUBECONFIG="${KUBECONFIG}" ISTIOCTL_PATH="${ISTIOCTL}" \ go run github.com/onsi/ginkgo/v2/ginkgo -tags e2e --timeout 30m --junit-report=report.xml ${GINKGO_FLAGS} "${WD}"/... diff --git a/tests/e2e/config/default.yaml b/tests/e2e/config/default.yaml index 58c947c9e..9f160497d 100644 --- a/tests/e2e/config/default.yaml +++ b/tests/e2e/config/default.yaml @@ -27,9 +27,4 @@ containerdConfigPatches: - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] endpoint = ["http://kind-registry:5000"] -networking: - # MAISTRA specific: - # our prow cluster uses serviceSubnet 10.96.0.0/12, so the kind cluster must use other subnet to correctly route traffic; - # in this case, address 10.224.0.0 is chosen randomly from available set of subnets. - serviceSubnet: "10.224.0.0/12" - ipFamily: ipv4 + diff --git a/tests/e2e/config/multicluster.json b/tests/e2e/config/multicluster.json new file mode 100644 index 000000000..247824a37 --- /dev/null +++ b/tests/e2e/config/multicluster.json @@ -0,0 +1,14 @@ +[ + { + "cluster_name": "primary", + "pod_subnet": "10.10.0.0/16", + "svc_subnet": "10.255.10.0/24", + "network_id": "0" + }, + { + "cluster_name": "remote", + "pod_subnet": "10.20.0.0/16", + "svc_subnet": "10.255.20.0/24", + "network_id": "1" + } +] \ No newline at end of file diff --git a/tests/e2e/controlplane/control_plane_suite_test.go b/tests/e2e/controlplane/control_plane_suite_test.go index 1603b380e..872a0578a 100644 --- a/tests/e2e/controlplane/control_plane_suite_test.go +++ b/tests/e2e/controlplane/control_plane_suite_test.go @@ -40,9 +40,13 @@ var ( skipDeploy = env.GetBool("SKIP_DEPLOY", false) expectedRegistry = env.Get("EXPECTED_REGISTRY", "^docker\\.io|^gcr\\.io") bookinfoNamespace = env.Get("BOOKINFO_NAMESPACE", "bookinfo") + multicluster = env.GetBool("MULTICLUSTER", false) ) func TestInstall(t *testing.T) { + if multicluster { + t.Skip("Skipping test for multicluster") + } RegisterFailHandler(Fail) setup() RunSpecs(t, "Control Plane Suite") @@ -52,6 +56,6 @@ func setup() { GinkgoWriter.Println("************ Running Setup ************") GinkgoWriter.Println("Initializing k8s client") - cl, err = k8sclient.InitK8sClient() + cl, err = k8sclient.InitK8sClient("") Expect(err).NotTo(HaveOccurred()) } diff --git a/tests/e2e/controlplane/control_plane_test.go b/tests/e2e/controlplane/control_plane_test.go index ab3007e2f..ec7a6574a 100644 --- a/tests/e2e/controlplane/control_plane_test.go +++ b/tests/e2e/controlplane/control_plane_test.go @@ -19,7 +19,6 @@ package controlplane import ( "fmt" "path/filepath" - "regexp" "strings" "time" @@ -43,12 +42,6 @@ import ( "istio.io/istio/pkg/ptr" ) -// version can have one of the following formats: -// - 1.22.2 -// - 1.23.0-rc.1 -// - 1.24-alpha -var istiodVersionRegex = regexp.MustCompile(`Version:"(\d+\.\d+(\.\d+)?(-\w+(\.\d+)?)?)`) - var _ = Describe("Control Plane Installation", Ordered, func() { SetDefaultEventuallyTimeout(180 * time.Second) SetDefaultEventuallyPollingInterval(time.Second) @@ -219,7 +212,7 @@ spec: It("deploys istiod", func(ctx SpecContext) { Eventually(common.GetObject).WithArguments(ctx, cl, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available; unexpected Condition") - Expect(getVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") Success("Istiod is deployed in the namespace and Running") }) @@ -356,19 +349,6 @@ func ImageFromRegistry(regexp string) types.GomegaMatcher { return HaveField("Image", MatchRegexp(regexp)) } -func getVersionFromIstiod() (string, error) { - output, err := kubectl.Exec(controlPlaneNamespace, "deploy/istiod", "", "pilot-discovery version") - if err != nil { - return "", fmt.Errorf("error getting version from istiod: %w", err) - } - - matches := istiodVersionRegex.FindStringSubmatch(output) - if len(matches) > 1 && matches[1] != "" { - return matches[1], nil - } - return "", fmt.Errorf("error getting version from istiod: version not found in output: %s", output) -} - func indent(level int, str string) string { indent := strings.Repeat(" ", level) return indent + strings.ReplaceAll(str, "\n", "\n"+indent) diff --git a/tests/e2e/integ-suite-kind.sh b/tests/e2e/integ-suite-kind.sh index 39a5b415e..90a939851 100755 --- a/tests/e2e/integ-suite-kind.sh +++ b/tests/e2e/integ-suite-kind.sh @@ -29,9 +29,19 @@ export KIND_REGISTRY="localhost:${KIND_REGISTRY_PORT}" export DEFAULT_CLUSTER_YAML="${SCRIPTPATH}/config/default.yaml" export IP_FAMILY="${IP_FAMILY:-ipv4}" export ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}" +export MULTICLUSTER="${MULTICLUSTER:-false}" +# Set variable to exclude kind clusters from kubectl annotations. +# You need to set kind clusters names separated by comma +export KIND_EXCLUDE_CLUSTERS="${KIND_EXCLUDE_CLUSTERS:-}" +export ISTIOCTL="${ISTIOCTL:-${ROOT}/bin/istioctl}" + # Set variable for cluster kind name export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-operator-integration-tests}" +if [ "${MULTICLUSTER}" == "true" ]; then + export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME}-1" + export KIND_CLUSTER_NAME_2="${KIND_CLUSTER_NAME}-2" +fi # Use the local registry instead of the default HUB export HUB="${KIND_REGISTRY}" @@ -52,16 +62,44 @@ function setup_kind_registry() { fi # https://docs.tilt.dev/choosing_clusters.html#discovering-the-registry - # TODO get context/config from existing variables - kind export kubeconfig --name="${KIND_CLUSTER_NAME}" - for node in $(kind get nodes --name="${KIND_CLUSTER_NAME}"); do - kubectl annotate node "${node}" "kind.x-k8s.io/registry=localhost:${KIND_REGISTRY_PORT}" --overwrite; + for cluster in $(kind get clusters); do + # TODO get context/config from existing variables + # Avoid adding the registry to excluded clusters. Use when you have multiple clusters running. + if [[ "${KIND_EXCLUDE_CLUSTERS}" == *"${cluster}"* ]]; then + continue + fi + + kind export kubeconfig --name="${cluster}" + for node in $(kind get nodes --name="${cluster}"); do + kubectl annotate node "${node}" "kind.x-k8s.io/registry=localhost:${KIND_REGISTRY_PORT}" --overwrite; + done done } -KUBECONFIG="${ARTIFACTS}/config" setup_kind_cluster "${KIND_CLUSTER_NAME}" "" "" "true" "true" -setup_kind_registry +if [ "${MULTICLUSTER}" == "true" ]; then + CLUSTER_TOPOLOGY_CONFIG_FILE="${SCRIPTPATH}/config/multicluster.json" + load_cluster_topology "${CLUSTER_TOPOLOGY_CONFIG_FILE}" + setup_kind_clusters "" "" + setup_kind_registry + + export KUBECONFIG="${KUBECONFIGS[0]}" + export KUBECONFIG2="${KUBECONFIGS[1]}" +else + KUBECONFIG="${ARTIFACTS}/config" setup_kind_cluster "${KIND_CLUSTER_NAME}" "" "" "true" "true" + setup_kind_registry +fi + + +# Check that istioctl is present using ${ISTIOCTL} +if ! command -v "${ISTIOCTL}" &> /dev/null; then + echo "istioctl not found. Please set the ISTIOCTL environment variable to the path of the istioctl binary" + exit 1 +fi # Run the integration tests echo "Running integration tests" +if [ "${MULTICLUSTER}" == "true" ]; then + ARTIFACTS="${ARTIFACTS}" ISTIOCTL="${ISTIOCTL}" ./tests/e2e/common-operator-integ-suite.sh --kind --multicluster +else ARTIFACTS="${ARTIFACTS}" ./tests/e2e/common-operator-integ-suite.sh --kind +fi \ No newline at end of file diff --git a/tests/e2e/multicluster/multicluster_multiprimary_test.go b/tests/e2e/multicluster/multicluster_multiprimary_test.go new file mode 100644 index 000000000..97c404c52 --- /dev/null +++ b/tests/e2e/multicluster/multicluster_multiprimary_test.go @@ -0,0 +1,344 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multicluster + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/istio-ecosystem/sail-operator/api/v1alpha1" + "github.com/istio-ecosystem/sail-operator/pkg/kube" + "github.com/istio-ecosystem/sail-operator/pkg/test/project" + . "github.com/istio-ecosystem/sail-operator/pkg/test/util/ginkgo" + "github.com/istio-ecosystem/sail-operator/pkg/test/util/supportedversion" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/istioctl" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Multicluster deployment models", Ordered, func() { + SetDefaultEventuallyTimeout(180 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + BeforeAll(func(ctx SpecContext) { + if !skipDeploy { + // Deploy the Sail Operator on both clusters + Expect(kubectl.CreateNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be created on Cluster #1") + Expect(kubectl.CreateNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created on Cluster #2") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig)). + To(Succeed(), "Operator failed to be deployed in Cluster #1") + + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Cluster #1 namespace and Running") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig2)). + To(Succeed(), "Operator failed to be deployed in Cluster #2") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Cluster #2 namespace and Running") + } + }) + + Describe("Multi-Primary Multi-Network configuration", func() { + // Test the Multi-Primary Multi-Network configuration for each supported Istio version + for _, version := range supportedversion.List { + Context("Istio version is: "+version.Version, func() { + When("Istio resources are created in both clusters with multicluster configuration", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be created") + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + + // Push the intermediate CA to both clusters + certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig, "east", "network1", artifacts, clPrimary) + certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig2, "west", "network2", artifacts, clRemote) + + // Wait for the secret to be created in both clusters + Eventually(func() error { + _, err := common.GetObject(context.Background(), clPrimary, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Cluster #1") + + Eventually(func() error { + _, err := common.GetObject(context.Background(), clRemote, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Cluster #1") + + multiclusterYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: Istio +metadata: + name: default +spec: + version: %s + namespace: %s + values: + global: + meshID: %s + multiCluster: + clusterName: %s + network: %s` + multiclusterCluster1YAML := fmt.Sprintf(multiclusterYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster1", "network1") + Log("Istio CR Cluster #1: ", multiclusterCluster1YAML) + Expect(kubectl.CreateFromString(multiclusterCluster1YAML, kubeconfig)).To(Succeed(), "Istio Resource creation failed on Cluster #1") + + multiclusterCluster2YAML := fmt.Sprintf(multiclusterYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster2", "network2") + Log("Istio CR Cluster #2: ", multiclusterCluster2YAML) + Expect(kubectl.CreateFromString(multiclusterCluster2YAML, kubeconfig2)).To(Succeed(), "Istio Resource creation failed on Cluster #2") + }) + + It("updates both Istio CR status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Cluster #1; unexpected Condition") + Success("Istio CR is Ready on Cluster #1") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Cluster #2; unexpected Condition") + Success("Istio CR is Ready on Cluster #1") + }) + + It("deploys istiod", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available on Cluster #1; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running on Cluster #1") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available on Cluster #2; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running on Cluster #2") + }) + }) + + When("Gateway is created in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.Apply(controlPlaneNamespace, eastGatewayYAML, kubeconfig)).To(Succeed(), "Gateway creation failed on Cluster #1") + + Expect(kubectl.Apply(controlPlaneNamespace, westGatewayYAML, kubeconfig2)).To(Succeed(), "Gateway creation failed on Cluster #2") + + // Expose the Gateway service in both clusters + Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig)).To(Succeed(), "Expose Service creation failed on Cluster #1") + Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig2)).To(Succeed(), "Expose Service creation failed on Cluster #2") + }) + + It("updates both Gateway status to Available", func(ctx SpecContext) { + Eventually((common.GetObject)). + WithArguments(ctx, clPrimary, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Cluster #1; unexpected Condition") + + Eventually((common.GetObject)). + WithArguments(ctx, clRemote, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Cluster #2; unexpected Condition") + Success("Gateway is created and available in both clusters") + }) + }) + + When("are installed remote secrets on each cluster", func() { + BeforeAll(func(ctx SpecContext) { + // Get the internal IP of the control plane node in both clusters + internalIPCluster1, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig) + Expect(err).NotTo(HaveOccurred()) + Expect(internalIPCluster1).NotTo(BeEmpty(), "Internal IP is empty for Cluster #1") + + internalIPCluster2, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig2) + Expect(internalIPCluster2).NotTo(BeEmpty(), "Internal IP is empty for Cluster #2") + Expect(err).NotTo(HaveOccurred()) + + // Install a remote secret in Cluster #1 that provides access to the Cluster #2 API server. + secret, err := istioctl.CreateRemoteSecret(kubeconfig2, "cluster2", internalIPCluster2) + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl.ApplyString("", secret, kubeconfig)).To(Succeed(), "Remote secret creation failed on Cluster #1") + + // Install a remote secret in Cluster #2 that provides access to the Cluster #1 API server. + secret, err = istioctl.CreateRemoteSecret(kubeconfig, "cluster1", internalIPCluster1) + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl.ApplyString("", secret, kubeconfig2)).To(Succeed(), "Remote secret creation failed on Cluster #1") + }) + + It("remote secrets are created", func(ctx SpecContext) { + secret, err := common.GetObject(ctx, clPrimary, kube.Key("istio-remote-secret-cluster2", controlPlaneNamespace), &corev1.Secret{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil(), "Secret is not created on Cluster #1") + + secret, err = common.GetObject(ctx, clRemote, kube.Key("istio-remote-secret-cluster1", controlPlaneNamespace), &corev1.Secret{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil(), "Secret is not created on Cluster #2") + Success("Remote secrets are created in both clusters") + }) + }) + + When("sample apps are deployed in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + // Deploy the sample app in both clusters + deploySampleApp("sample", version, kubeconfig, kubeconfig2) + Success("Sample app is deployed in both clusters") + }) + + It("updates the pods status to Ready", func(ctx SpecContext) { + samplePodsCluster1 := &corev1.PodList{} + + clPrimary.List(ctx, samplePodsCluster1, client.InNamespace("sample")) + Expect(samplePodsCluster1.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsCluster1.Items { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Cluster #1; unexpected Condition") + } + + samplePodsCluster2 := &corev1.PodList{} + clRemote.List(ctx, samplePodsCluster2, client.InNamespace("sample")) + Expect(samplePodsCluster2.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsCluster2.Items { + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Cluster #2; unexpected Condition") + } + Success("Sample app is created in both clusters and Running") + }) + + It("can access the sample app from both clusters", func(ctx SpecContext) { + sleepPodNameCluster1, err := common.GetPodNameByLabel(ctx, clPrimary, "sample", "app", "sleep") + Expect(sleepPodNameCluster1).NotTo(BeEmpty(), "Sleep pod not found on Cluster #1") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Cluster #1") + + sleepPodNameCluster2, err := common.GetPodNameByLabel(ctx, clRemote, "sample", "app", "sleep") + Expect(sleepPodNameCluster2).NotTo(BeEmpty(), "Sleep pod not found on Cluster #2") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Cluster #2") + + // Run the curl command from the sleep pod in the Cluster #2 and get response list to validate that we get responses from both clusters + Cluster2Responses := strings.Join(getListCurlResponses(sleepPodNameCluster2, kubeconfig2), "\n") + Expect(Cluster2Responses).To(ContainSubstring("Hello version: v1"), "Responses from Cluster #2 are not the expected") + Expect(Cluster2Responses).To(ContainSubstring("Hello version: v2"), "Responses from Cluster #2 are not the expected") + + // Run the curl command from the sleep pod in the Cluster #1 and get response list to validate that we get responses from both clusters + Cluster1Responses := strings.Join(getListCurlResponses(sleepPodNameCluster1, kubeconfig), "\n") + Expect(Cluster1Responses).To(ContainSubstring("Hello version: v1"), "Responses from Cluster #1 are not the expected") + Expect(Cluster1Responses).To(ContainSubstring("Hello version: v2"), "Responses from Cluster #1 are not the expected") + Success("Sample app is accessible from both clusters") + }) + }) + + When("istio CR is deleted in both clusters", func() { + BeforeEach(func() { + // Delete the Istio CR in both clusters + Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig2)).To(Succeed(), "Istio CR failed to be deleted") + Success("Istio CR is deleted in both clusters") + }) + + It("removes istiod pod", func(ctx SpecContext) { + // Check istiod pod is deleted in both clusters + Eventually(clPrimary.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore on Cluster #1") + Eventually(clRemote.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore on Cluster #2") + }) + }) + + AfterAll(func(ctx SpecContext) { + // Delete namespace to ensure clean up for new tests iteration + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + + common.CheckNamespaceEmpty(ctx, clPrimary, controlPlaneNamespace) + common.CheckNamespaceEmpty(ctx, clRemote, controlPlaneNamespace) + Success("ControlPlane Namespaces are empty") + + // Delete the entire sample namespace in both clusters + Expect(kubectl.DeleteNamespace("sample", kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectl.DeleteNamespace("sample", kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + + common.CheckNamespaceEmpty(ctx, clPrimary, "sample") + common.CheckNamespaceEmpty(ctx, clRemote, "sample") + Success("Sample app is deleted in both clusters") + }) + }) + } + }) + + AfterAll(func(ctx SpecContext) { + // Delete the Sail Operator from both clusters + Expect(kubectl.DeleteNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectl.DeleteNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + + // Delete the intermediate CA from both clusters + common.CheckNamespaceEmpty(ctx, clPrimary, namespace) + common.CheckNamespaceEmpty(ctx, clRemote, namespace) + }) +}) + +// deploySampleApp deploys the sample app in the given cluster +func deploySampleApp(ns string, istioVersion supportedversion.VersionInfo, kubeconfig string, kubeconfig2 string) { + // Create the namespace + Expect(kubectl.CreateNamespace(ns, kubeconfig)).To(Succeed(), "Namespace failed to be created") + Expect(kubectl.CreateNamespace(ns, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + + // Label the namespace + Expect(kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). + To(Succeed(), "Error patching sample namespace") + Expect(kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`, kubeconfig2)). + To(Succeed(), "Error patching sample namespace") + + version := istioVersion.Version + // Deploy the sample app from upstream URL in both clusters + if istioVersion.Name == "latest" { + version = "master" + } + helloWorldURL := fmt.Sprintf("https://raw.githubusercontent.com/istio/istio/%s/samples/helloworld/helloworld.yaml", version) + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "service=helloworld", kubeconfig)).To(Succeed(), "Sample service deploy failed on Cluster #1") + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "service=helloworld", kubeconfig2)).To(Succeed(), "Sample service deploy failed on Cluster #2") + + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "version=v1", kubeconfig)).To(Succeed(), "Sample service deploy failed on Cluster #1") + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "version=v2", kubeconfig2)).To(Succeed(), "Sample service deploy failed on Cluster #2") + + sleepURL := fmt.Sprintf("https://raw.githubusercontent.com/istio/istio/%s/samples/sleep/sleep.yaml", version) + Expect(kubectl.Apply(ns, sleepURL, kubeconfig)).To(Succeed(), "Sample sleep deploy failed on Cluster #1") + Expect(kubectl.Apply(ns, sleepURL, kubeconfig2)).To(Succeed(), "Sample sleep deploy failed on Cluster #2") +} + +// getListCurlResponses runs the curl command 10 times from the sleep pod in the given cluster and get response list +func getListCurlResponses(podName, kubeconfig string) []string { + var responses []string + for i := 0; i < 10; i++ { + response, err := kubectl.Exec("sample", podName, "sleep", "curl -sS helloworld.sample:5000/hello", kubeconfig) + Expect(err).NotTo(HaveOccurred()) + responses = append(responses, response) + } + return responses +} diff --git a/tests/e2e/multicluster/multicluster_primaryremote_test.go b/tests/e2e/multicluster/multicluster_primaryremote_test.go new file mode 100644 index 000000000..798db3c2c --- /dev/null +++ b/tests/e2e/multicluster/multicluster_primaryremote_test.go @@ -0,0 +1,346 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multicluster + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/istio-ecosystem/sail-operator/api/v1alpha1" + "github.com/istio-ecosystem/sail-operator/pkg/kube" + "github.com/istio-ecosystem/sail-operator/pkg/test/project" + . "github.com/istio-ecosystem/sail-operator/pkg/test/util/ginkgo" + "github.com/istio-ecosystem/sail-operator/pkg/test/util/supportedversion" + certs "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/istioctl" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Multicluster deployment models", Ordered, func() { + SetDefaultEventuallyTimeout(180 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + BeforeAll(func(ctx SpecContext) { + if !skipDeploy { + // Deploy the Sail Operator on both clusters + Expect(kubectl.CreateNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be created on Primary Cluster") + Expect(kubectl.CreateNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created on Remote Cluster") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig)). + To(Succeed(), "Operator failed to be deployed in Primary Cluster") + + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Primary namespace and Running") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig2)). + To(Succeed(), "Operator failed to be deployed in Remote Cluster") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Remote namespace and Running") + } + }) + + Describe("Primary-Remote - Multi-Network configuration", func() { + // Test the Primary-Remote - Multi-Network configuration for each supported Istio version + for _, version := range supportedversion.List { + // The Primary-Remote - Multi-Network configuration is only supported in Istio 1.23 and later + if version.Major < 1 || (version.Major == 1 && version.Minor < 23) { + continue + } + + Context("Istio version is: "+version.Version, func() { + When("Istio resources are created in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be created") + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + + // Push the intermediate CA to both clusters + Expect(certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig, "east", "network1", artifacts, clPrimary)). + To(Succeed(), "Error pushing intermediate CA to Primary Cluster") + Expect(certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig2, "west", "network2", artifacts, clRemote)). + To(Succeed(), "Error pushing intermediate CA to Remote Cluster") + + // Wait for the secret to be created in both clusters + Eventually(func() error { + _, err := common.GetObject(context.Background(), clPrimary, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Primary Cluster") + + Eventually(func() error { + _, err := common.GetObject(context.Background(), clRemote, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Primary Cluster") + + PrimaryYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: Istio +metadata: + name: default +spec: + version: %s + namespace: %s + values: + pilot: + env: + EXTERNAL_ISTIOD: "true" + global: + meshID: %s + multiCluster: + clusterName: %s + network: %s` + multiclusterPrimaryYAML := fmt.Sprintf(PrimaryYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster1", "network1") + Log("Istio CR Primary: ", multiclusterPrimaryYAML) + Expect(kubectl.CreateFromString(multiclusterPrimaryYAML, kubeconfig)).To(Succeed(), "Istio Resource creation failed on Primary Cluster") + }) + + It("updates Istio CR on Primary cluster status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Primary; unexpected Condition") + Success("Istio CR is Ready on Primary Cluster") + }) + + It("deploys istiod", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available on Primary; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running on Primary Cluster") + }) + }) + + When("Gateway is created on Primary cluster ", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.Apply(controlPlaneNamespace, eastGatewayYAML, kubeconfig)).To(Succeed(), "Gateway creation failed on Primary Cluster") + + // Expose istiod service in Primary cluster + Expect(kubectl.Apply(controlPlaneNamespace, exposeIstiodYAML, kubeconfig)).To(Succeed(), "Expose Istiod creation failed on Primary Cluster") + + // Expose the Gateway service in both clusters + Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig)).To(Succeed(), "Expose Service creation failed on Primary Cluster") + }) + + It("updates Gateway status to Available", func(ctx SpecContext) { + Eventually((common.GetObject)). + WithArguments(ctx, clPrimary, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Primary; unexpected Condition") + }) + }) + + When("RemoteIstio is created in Remote cluster", func() { + BeforeAll(func(ctx SpecContext) { + RemoteYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: RemoteIstio +metadata: + name: default +spec: + version: %s + namespace: istio-system + values: + istiodRemote: + injectionPath: /inject/cluster/remote/net/network2 + global: + remotePilotAddress: %s` + + remotePilotAddress, err := common.GetSVCLoadBalancerAddress(ctx, clPrimary, controlPlaneNamespace, "istio-eastwestgateway") + Expect(remotePilotAddress).NotTo(BeEmpty(), "Remote Pilot Address is empty") + Expect(err).NotTo(HaveOccurred(), "Error getting Remote Pilot Address") + remoteIstioYAML := fmt.Sprintf(RemoteYAML, version.Name, remotePilotAddress) + Log("RemoteIstio CR: ", remoteIstioYAML) + By("Creating RemoteIstio CR on Remote Cluster") + Expect(kubectl.CreateFromString(remoteIstioYAML, kubeconfig2)).To(Succeed(), "RemoteIstio Resource creation failed on Remote Cluster") + + // Set the controlplane cluster and network for Remote namespace + By("Patching the istio-system namespace on Remote Cluster") + Expect( + kubectl.Patch("", + "namespace", + controlPlaneNamespace, + "merge", + `{"metadata":{"annotations":{"topology.istio.io/controlPlaneClusters":"cluster1"}}}`, + kubeconfig2)). + To(Succeed(), "Error patching istio-system namespace") + Expect( + kubectl.Patch("", + "namespace", + controlPlaneNamespace, + "merge", + `{"metadata":{"labels":{"topology.istio.io/network":"network2"}}}`, + kubeconfig2)). + To(Succeed(), "Error patching istio-system namespace") + + // To be able to access the remote cluster from the primary cluster, we need to create a secret in the primary cluster + // RemoteIstio resource will not be Ready until the secret is created + // Get the internal IP of the control plane node in Remote cluster + internalIPRemote, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig2) + Expect(internalIPRemote).NotTo(BeEmpty(), "Internal IP is empty for Remote Cluster") + Expect(err).NotTo(HaveOccurred()) + + // Wait for the RemoteIstio CR to be created, this can be moved to a condition verification, but the resource it not will be Ready at this point + time.Sleep(5 * time.Second) + + // Install a remote secret in Primary cluster that provides access to the Remote cluster API server. + By("Creating Remote Secret on Primary Cluster") + secret, err := istioctl.CreateRemoteSecret(kubeconfig2, "remote", internalIPRemote) + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl.ApplyString("", secret, kubeconfig)).To(Succeed(), "Remote secret creation failed on Primary Cluster") + }) + + It("secret is created", func(ctx SpecContext) { + secret, err := common.GetObject(ctx, clPrimary, kube.Key("istio-remote-secret-remote", controlPlaneNamespace), &corev1.Secret{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil(), "Secret is not created on Primary Cluster") + Success("Remote secret is created in Primary cluster") + }) + + It("updates RemoteIstio CR status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(istioName), &v1alpha1.RemoteIstio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Remote; unexpected Condition") + Success("RemoteIstio CR is Ready on Remote Cluster") + }) + }) + + When("gateway is created in Remote cluster", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.Apply(controlPlaneNamespace, westGatewayYAML, kubeconfig2)).To(Succeed(), "Gateway creation failed on Remote Cluster") + Success("Gateway is created in Remote cluster") + }) + + It("updates Gateway status to Available", func(ctx SpecContext) { + Eventually((common.GetObject)). + WithArguments(ctx, clRemote, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Remote; unexpected Condition") + Success("Gateway is created and available in Remote cluster") + }) + }) + + When("sample apps are deployed in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + // Deploy the sample app in both clusters + deploySampleApp("sample", version, kubeconfig, kubeconfig2) + Success("Sample app is deployed in both clusters") + }) + + It("updates the pods status to Ready", func(ctx SpecContext) { + samplePodsPrimary := &corev1.PodList{} + + clPrimary.List(ctx, samplePodsPrimary, client.InNamespace("sample")) + Expect(samplePodsPrimary.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsPrimary.Items { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Primary; unexpected Condition") + } + + samplePodsRemote := &corev1.PodList{} + clRemote.List(ctx, samplePodsRemote, client.InNamespace("sample")) + Expect(samplePodsRemote.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsRemote.Items { + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Remote; unexpected Condition") + } + Success("Sample app is created in both clusters and Running") + }) + + It("can access the sample app from both clusters", func(ctx SpecContext) { + sleepPodNamePrimary, err := common.GetPodNameByLabel(ctx, clPrimary, "sample", "app", "sleep") + Expect(sleepPodNamePrimary).NotTo(BeEmpty(), "Sleep pod not found on Primary Cluster") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Primary Cluster") + + sleepPodNameRemote, err := common.GetPodNameByLabel(ctx, clRemote, "sample", "app", "sleep") + Expect(sleepPodNameRemote).NotTo(BeEmpty(), "Sleep pod not found on Remote Cluster") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Remote Cluster") + + // Run the curl command from the sleep pod in the Remote Cluster and get response list to validate that we get responses from both clusters + remoteResponses := strings.Join(getListCurlResponses(sleepPodNameRemote, kubeconfig2), "\n") + Expect(remoteResponses).To(ContainSubstring("Hello version: v1"), "Responses from Remote Cluster are not the expected") + Expect(remoteResponses).To(ContainSubstring("Hello version: v2"), "Responses from Remote Cluster are not the expected") + + // Run the curl command from the sleep pod in the Primary Cluster and get response list to validate that we get responses from both clusters + primaryResponses := strings.Join(getListCurlResponses(sleepPodNamePrimary, kubeconfig), "\n") + Expect(primaryResponses).To(ContainSubstring("Hello version: v1"), "Responses from Primary Cluster are not the expected") + Expect(primaryResponses).To(ContainSubstring("Hello version: v2"), "Responses from Primary Cluster are not the expected") + Success("Sample app is accessible from both clusters") + }) + }) + + When("Istio CR and RemoteIstio CR are deleted in both clusters", func() { + BeforeEach(func() { + Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectl.Delete(controlPlaneNamespace, "remoteistio", istioName, kubeconfig2)).To(Succeed(), "RemoteIstio CR failed to be deleted") + Success("Istio and RemoteIstio are deleted") + }) + + It("removes istiod on Primary", func(ctx SpecContext) { + Eventually(clPrimary.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore") + Success("Istiod is deleted on Primary Cluster") + }) + }) + + AfterAll(func(ctx SpecContext) { + // Delete namespace to ensure clean up for new tests iteration + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + + common.CheckNamespaceEmpty(ctx, clPrimary, controlPlaneNamespace) + common.CheckNamespaceEmpty(ctx, clRemote, controlPlaneNamespace) + Success("ControlPlane Namespaces are empty") + + // Delete the entire sample namespace in both clusters + Expect(kubectl.DeleteNamespace("sample", kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectl.DeleteNamespace("sample", kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + + common.CheckNamespaceEmpty(ctx, clPrimary, "sample") + common.CheckNamespaceEmpty(ctx, clRemote, "sample") + Success("Sample app is deleted in both clusters") + }) + }) + } + }) + + AfterAll(func(ctx SpecContext) { + // Delete the Sail Operator from both clusters + Expect(kubectl.DeleteNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectl.DeleteNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + + // Check that the namespace is empty + common.CheckNamespaceEmpty(ctx, clPrimary, namespace) + common.CheckNamespaceEmpty(ctx, clRemote, namespace) + }) +}) diff --git a/tests/e2e/multicluster/multicluster_suite_test.go b/tests/e2e/multicluster/multicluster_suite_test.go new file mode 100644 index 000000000..5c0cd061a --- /dev/null +++ b/tests/e2e/multicluster/multicluster_suite_test.go @@ -0,0 +1,95 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multicluster + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" + k8sclient "github.com/istio-ecosystem/sail-operator/tests/e2e/util/client" + env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + clPrimary client.Client + clRemote client.Client + err error + ocp = env.GetBool("OCP", false) + namespace = env.Get("NAMESPACE", "sail-operator") + deploymentName = env.Get("DEPLOYMENT_NAME", "sail-operator") + controlPlaneNamespace = env.Get("CONTROL_PLANE_NS", "istio-system") + istioName = env.Get("ISTIO_NAME", "default") + image = env.Get("IMAGE", "quay.io/maistra-dev/sail-operator:latest") + skipDeploy = env.GetBool("SKIP_DEPLOY", false) + multicluster = env.GetBool("MULTICLUSTER", false) + kubeconfig = env.Get("KUBECONFIG", "") + kubeconfig2 = env.Get("KUBECONFIG2", "") + artifacts = env.Get("ARTIFACTS", "/tmp/artifacts") + + eastGatewayYAML string + westGatewayYAML string + exposeServiceYAML string + exposeIstiodYAML string +) + +func TestInstall(t *testing.T) { + if !multicluster { + t.Skip("Skipping test. Only valid for multicluster") + } + if ocp { + t.Skip("Skipping test. Not valid for OCP") + // TODO: Implement the steps to run the test on OCP + } + RegisterFailHandler(Fail) + setup(t) + RunSpecs(t, "Control Plane Suite") +} + +func setup(t *testing.T) { + GinkgoWriter.Println("************ Running Setup ************") + + GinkgoWriter.Println("Initializing k8s client") + clPrimary, err = k8sclient.InitK8sClient(kubeconfig) + clRemote, err = k8sclient.InitK8sClient(kubeconfig2) + if err != nil { + t.Fatalf("Error initializing k8s client: %v", err) + } + + err := certs.CreateIntermediateCA(artifacts) + if err != nil { + t.Fatalf("Error creating intermediate CA: %v", err) + } + + // Set the path for the multicluster YAML files to be used + workDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting working directory: %v", err) + } + + // Set base path + baseRepoDir := filepath.Join(workDir, "../../..") + eastGatewayYAML = fmt.Sprintf("%s/docs/multicluster/east-west-gateway-net1.yaml", baseRepoDir) + westGatewayYAML = fmt.Sprintf("%s/docs/multicluster/east-west-gateway-net2.yaml", baseRepoDir) + exposeServiceYAML = fmt.Sprintf("%s/docs/multicluster/expose-services.yaml", baseRepoDir) + exposeIstiodYAML = fmt.Sprintf("%s/docs/multicluster/expose-istiod.yaml", baseRepoDir) +} diff --git a/tests/e2e/operator/operator_suite_test.go b/tests/e2e/operator/operator_suite_test.go index ebe1c972d..32b737594 100644 --- a/tests/e2e/operator/operator_suite_test.go +++ b/tests/e2e/operator/operator_suite_test.go @@ -33,9 +33,13 @@ var ( image = env.Get("IMAGE", "quay.io/maistra-dev/sail-operator:latest") namespace = env.Get("NAMESPACE", "sail-operator") deploymentName = env.Get("DEPLOYMENT_NAME", "sail-operator") + multicluster = env.GetBool("MULTICLUSTER", false) ) func TestInstall(t *testing.T) { + if multicluster { + t.Skip("Skipping test for multicluster") + } RegisterFailHandler(Fail) setup() RunSpecs(t, "Install Operator Suite") @@ -46,7 +50,7 @@ func setup() { GinkgoWriter.Println("Initializing k8s client") var err error - cl, err = k8sclient.InitK8sClient() + cl, err = k8sclient.InitK8sClient("") Expect(err).NotTo(HaveOccurred()) if ocp { diff --git a/tests/e2e/util/certs/certs.go b/tests/e2e/util/certs/certs.go new file mode 100644 index 000000000..78aaaa004 --- /dev/null +++ b/tests/e2e/util/certs/certs.go @@ -0,0 +1,280 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certs + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/istio-ecosystem/sail-operator/pkg/kube" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/shell" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CreateIntermediateCA creates the intermediate CA +func CreateIntermediateCA(basePath string) error { + certsDir := filepath.Join(basePath, "certs") + + // Create the certs directory + err := os.MkdirAll(certsDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create certs directory: %w", err) + } + + // Create the root CA configuration file + err = createRootCAConf(certsDir) + if err != nil { + return fmt.Errorf("failed to create root-ca.conf: %w", err) + } + + // Step 1: Generate root-key.pem + rootKey := filepath.Join(certsDir, "root-key.pem") + _, err = shell.ExecuteCommand(fmt.Sprintf("openssl genrsa -out %s 4096", rootKey)) + if err != nil { + return fmt.Errorf("failed to generate root-key.pem: %w", err) + } + + // Step 2: Generate root-cert.csr using root-key.pem and root-ca.conf + rootCSR := filepath.Join(certsDir, "root-cert.csr") + rootConf := filepath.Join(certsDir, "root-ca.conf") // You'll need to ensure root-ca.conf exists + _, err = shell.ExecuteCommand(fmt.Sprintf("openssl req -sha256 -new -key %s -config %s -out %s", rootKey, rootConf, rootCSR)) + if err != nil { + return fmt.Errorf("failed to generate root-cert.csr: %w", err) + } + + // Step 3: Generate root-cert.pem + rootCert := filepath.Join(certsDir, "root-cert.pem") + _, err = shell.ExecuteCommand( + fmt.Sprintf("openssl x509 -req -sha256 -days 3650 -signkey %s -extensions req_ext -extfile %s -in %s -out %s", + rootKey, rootConf, rootCSR, rootCert)) + if err != nil { + return fmt.Errorf("failed to generate root-cert.pem: %w", err) + } + + // Step 4: Generate east-cacerts (self-signed intermediate certificates) + // Create directories for east and west if needed + eastDir := filepath.Join(certsDir, "east") + westDir := filepath.Join(certsDir, "west") + + // Create the east and west directories + err = os.MkdirAll(eastDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create east directory: %w", err) + } + err = os.MkdirAll(westDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create west directory: %w", err) + } + + // Create the intermediate CA configuration file + err = createIntermediateCAConf(eastDir) + if err != nil { + return fmt.Errorf("failed to create ca.conf on east dir: %w", err) + } + + err = createIntermediateCAConf(westDir) + if err != nil { + return fmt.Errorf("failed to create ca.conf on west dir: %w", err) + } + + err = generateIntermediateCACertificates(eastDir, rootCert, rootKey) + if err != nil { + return fmt.Errorf("failed to generate east intermediate CA certificates: %w", err) + } + + err = generateIntermediateCACertificates(westDir, rootCert, rootKey) + if err != nil { + return fmt.Errorf("failed to generate west intermediate CA certificates: %w", err) + } + + return nil +} + +func generateIntermediateCACertificates(dir string, rootCert string, rootKey string) error { + caKey := filepath.Join(dir, "ca-key.pem") + _, err := shell.ExecuteCommand(fmt.Sprintf("openssl genrsa -out %s 4096", caKey)) + if err != nil { + return fmt.Errorf("failed to generate east-ca-key.pem: %w", err) + } + + caCSR := filepath.Join(dir, "ca-cert.csr") + caConf := filepath.Join(dir, "ca.conf") + _, err = shell.ExecuteCommand(fmt.Sprintf("openssl req -sha256 -new -config %s -key %s -out %s", caConf, caKey, caCSR)) + if err != nil { + return fmt.Errorf("failed to generate east-ca-cert.csr: %w", err) + } + + caCert := filepath.Join(dir, "ca-cert.pem") + _, err = shell.ExecuteCommand( + fmt.Sprintf("openssl x509 -req -sha256 -days 3650 -CA %s -CAkey %s -CAcreateserial -extensions req_ext -extfile %s -in %s -out %s", + rootCert, rootKey, caConf, caCSR, caCert)) + if err != nil { + return fmt.Errorf("failed to generate east-ca-cert.pem: %w", err) + } + + certChain := filepath.Join(dir, "cert-chain.pem") + _, err = shell.ExecuteCommand(fmt.Sprintf("cat %s %s > %s", caCert, rootCert, certChain)) + if err != nil { + return fmt.Errorf("failed to generate east-cert-chain.pem: %w", err) + } + + return nil +} + +// createRootCAConf creates the root CA configuration file +func createRootCAConf(certsDir string) error { + confPath := filepath.Join(certsDir, "root-ca.conf") + confContent := ` +[ req ] +encrypt_key = no +prompt = no +utf8 = yes +default_md = sha256 +default_bits = 4096 +req_extensions = req_ext +x509_extensions = req_ext +distinguished_name = req_dn + +[ req_ext ] +subjectKeyIdentifier = hash +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, nonRepudiation, keyEncipherment, keyCertSign + +[ req_dn ] +O = Istio +CN = Root CA +` + + // Write the configuration file to the directory + return writeFile(confPath, confContent) +} + +// createIntermediateCAConf creates the intermediate CA configuration file +func createIntermediateCAConf(certsDir string) error { + confPath := filepath.Join(certsDir, "ca.conf") + confContent := fmt.Sprintf(` +[ req ] +encrypt_key = no +prompt = no +utf8 = yes +default_md = sha256 +default_bits = 4096 +req_extensions = req_ext +x509_extensions = req_ext +distinguished_name = req_dn + +[ req_ext ] +subjectKeyIdentifier = hash +basicConstraints = critical, CA:true, pathlen:0 +keyUsage = critical, digitalSignature, nonRepudiation, keyEncipherment, keyCertSign +subjectAltName=@san + +[ san ] +DNS.1 = istiod.istio-system.svc + +[ req_dn ] +O = Istio +CN = Intermediate CA +L = %s +`, confPath) + + // Write the configuration file to the directory + return writeFile(confPath, confContent) +} + +// writeFile writes the content to the file +func writeFile(confPath string, confContent string) error { + file, err := os.Create(confPath) + if err != nil { + return fmt.Errorf("failed to create %s: %v", confPath, err) + } + defer file.Close() + + _, err = file.WriteString(confContent) + if err != nil { + return fmt.Errorf("failed to write to %s: %v", confPath, err) + } + + return nil +} + +// PushIntermediateCA pushes the intermediate CA to the cluster +func PushIntermediateCA(ns, kubeconfig, zone, network, basePath string, cl client.Client) error { + // Set cert dir + certDir := filepath.Join(basePath, "certs") + + // Check if the secret exists in the cluster + _, err := common.GetObject(context.Background(), cl, kube.Key("cacerts", ns), &corev1.Secret{}) + if err != nil { + // Label the namespace with the network + err = kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"topology.istio.io/network":"`+network+`"}}}`, kubeconfig) + if err != nil { + return fmt.Errorf("failed to label namespace: %w", err) + } + + // Read the pem content from the files + caCertPath := filepath.Join(certDir, zone, "ca-cert.pem") + caKeyPath := filepath.Join(certDir, zone, "ca-key.pem") + rootCertPath := filepath.Join(certDir, "root-cert.pem") + certChainPath := filepath.Join(certDir, zone, "cert-chain.pem") + + // Read the pem content from the files to create the secret + caCert, err := os.ReadFile(caCertPath) + if err != nil { + return fmt.Errorf("failed to read ca-cert.pem: %w", err) + } + caKey, err := os.ReadFile(caKeyPath) + if err != nil { + return fmt.Errorf("failed to read ca-key.pem: %w", err) + } + rootCert, err := os.ReadFile(rootCertPath) + if err != nil { + return fmt.Errorf("failed to read root-cert.pem: %w", err) + } + certChain, err := os.ReadFile(certChainPath) + if err != nil { + return fmt.Errorf("failed to read cert-chain.pem: %w", err) + } + + // Create the secret by using the client in the cluster and the files created in the setup + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cacerts", + Namespace: ns, + }, + Data: map[string][]byte{ + "ca-cert.pem": caCert, + "ca-key.pem": caKey, + "root-cert.pem": rootCert, + "cert-chain.pem": certChain, + }, + } + + err = cl.Create(context.Background(), secret) + if err != nil { + return fmt.Errorf("failed to create secret: %w", err) + } + } + + return nil +} diff --git a/tests/e2e/util/client/client.go b/tests/e2e/util/client/client.go index 9ab45ee74..efebdad0f 100644 --- a/tests/e2e/util/client/client.go +++ b/tests/e2e/util/client/client.go @@ -27,8 +27,17 @@ import ( ) // getConfig returns the configuration of the kubernetes go-client -func getConfig() (*rest.Config, error) { - // use the current context in kubeconfig +func getConfig(kubeconfig string) (*rest.Config, error) { + // If kubeconfig is provided, use it + if kubeconfig != "" { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("error building config: %w", err) + } + + return config, nil + } + // If not kubeconfig is provided use the current context in kubeconfig config, err := clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG")) if err != nil { return nil, fmt.Errorf("error building config: %w", err) @@ -38,8 +47,11 @@ func getConfig() (*rest.Config, error) { } // InitK8sClient returns the kubernetes clientset -func InitK8sClient() (client.Client, error) { - config, err := getConfig() +// Arguments: +// Kubeconfig: string +// Set kubeconfig to "" to use the current context in kubeconfig +func InitK8sClient(kubeconfig string) (client.Client, error) { + config, err := getConfig(kubeconfig) if err != nil { return nil, fmt.Errorf("error getting config for k8s client: %w", err) } diff --git a/tests/e2e/util/common/e2e_utils.go b/tests/e2e/util/common/e2e_utils.go index a787a4b49..e17e07686 100644 --- a/tests/e2e/util/common/e2e_utils.go +++ b/tests/e2e/util/common/e2e_utils.go @@ -18,6 +18,8 @@ package common import ( "context" + "fmt" + "regexp" "strings" "time" @@ -39,6 +41,12 @@ var ( istioName = env.Get("ISTIO_NAME", "default") istioCniName = env.Get("ISTIOCNI_NAME", "default") istioCniNamespace = env.Get("ISTIOCNI_NAMESPACE", "istio-cni") + + // version can have one of the following formats: + // - 1.22.2 + // - 1.23.0-rc.1 + // - 1.24-alpha + istiodVersionRegex = regexp.MustCompile(`Version:"(\d+\.\d+(\.\d+)?(-\w+(\.\d+)?)?)`) ) // getObject returns the object with the given key @@ -53,6 +61,36 @@ func GetList(ctx context.Context, cl client.Client, list client.ObjectList, opts return list, err } +// GetPodNameByLabel returns the name of the pod with the given label +func GetPodNameByLabel(ctx context.Context, cl client.Client, ns, labelKey, labelValue string) (string, error) { + podList := &corev1.PodList{} + err := cl.List(ctx, podList, client.InNamespace(ns), client.MatchingLabels{labelKey: labelValue}) + if err != nil { + return "", err + } + if len(podList.Items) == 0 { + return "", fmt.Errorf("no pod found with label %s=%s", labelKey, labelValue) + } + return podList.Items[0].Name, nil +} + +// GetSVCAddress returns the address of the service with the given name +func GetSVCLoadBalancerAddress(ctx context.Context, cl client.Client, ns, svcName string) (string, error) { + svc := &corev1.Service{} + err := cl.Get(ctx, client.ObjectKey{Namespace: ns, Name: svcName}, svc) + if err != nil { + return "", err + } + + // To avoid flakiness, wait for the LoadBalancer to be ready + Eventually(func() ([]corev1.LoadBalancerIngress, error) { + err := cl.Get(ctx, client.ObjectKey{Namespace: ns, Name: svcName}, svc) + return svc.Status.LoadBalancer.Ingress, err + }, "1m", "1s").ShouldNot(BeEmpty(), "LoadBalancer should be ready") + + return svc.Status.LoadBalancer.Ingress[0].IP, nil +} + // checkNamespaceEmpty checks if the given namespace is empty func CheckNamespaceEmpty(ctx SpecContext, cl client.Client, ns string) { // TODO: Check to add more validations @@ -107,7 +145,7 @@ func logOperatorDebugInfo() { logDebugElement("Events in "+namespace, events, err) // Temporaty information to gather more details about failure - pods, err := kubectl.GetPods(namespace, "-o wide") + pods, err := kubectl.GetPods(namespace, "", "-o wide") logDebugElement("Pods in "+namespace, pods, err) describe, err := kubectl.Describe(namespace, "deployment", deploymentName) @@ -118,7 +156,7 @@ func logIstioDebugInfo() { resource, err := kubectl.GetYAML("", "istio", istioName) logDebugElement("Istio YAML", resource, err) - output, err := kubectl.GetPods(controlPlaneNamespace, "-o wide") + output, err := kubectl.GetPods(controlPlaneNamespace, "", "-o wide") logDebugElement("Pods in "+controlPlaneNamespace, output, err) logs, err := kubectl.Logs(controlPlaneNamespace, "deploy/istiod", ptr.Of(120*time.Second)) @@ -139,7 +177,7 @@ func logCNIDebugInfo() { logDebugElement("Events in "+istioCniNamespace, events, err) // Temporaty information to gather more details about failure - pods, err := kubectl.GetPods(istioCniNamespace, "-o wide") + pods, err := kubectl.GetPods(istioCniNamespace, "", "-o wide") logDebugElement("Pods in "+istioCniNamespace, pods, err) describe, err := kubectl.Describe(istioCniNamespace, "daemonset", "istio-cni-node") @@ -155,3 +193,16 @@ func logDebugElement(caption string, info string, err error) { GinkgoWriter.Println(indent + strings.ReplaceAll(strings.TrimSpace(info), "\n", "\n"+indent)) } } + +func GetVersionFromIstiod() (string, error) { + output, err := kubectl.Exec(controlPlaneNamespace, "deploy/istiod", "", "pilot-discovery version") + if err != nil { + return "", fmt.Errorf("error getting version from istiod: %w", err) + } + + matches := istiodVersionRegex.FindStringSubmatch(output) + if len(matches) > 1 && matches[1] != "" { + return matches[1], nil + } + return "", fmt.Errorf("error getting version from istiod: version not found in output: %s", output) +} diff --git a/tests/e2e/util/istioctl/istioctl.go b/tests/e2e/util/istioctl/istioctl.go new file mode 100644 index 000000000..e055f5828 --- /dev/null +++ b/tests/e2e/util/istioctl/istioctl.go @@ -0,0 +1,55 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR Condition OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package istioctl + +import ( + "fmt" + + env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/shell" +) + +var istioctlBinary = env.Get("ISTIOCTL_PATH", "istioctl") + +// Istioctl returns the istioctl command +// If the environment variable COMMAND is set, it will return the value of COMMAND +// Otherwise, it will return the default value "istioctl" as default +// Arguments: +// - format: format of the command without istioctl +// - args: arguments of the command +func istioctl(format string, args ...interface{}) string { + binary := "istioctl" + if istioctlBinary != "" { + binary = istioctlBinary + } + + cmd := fmt.Sprintf(format, args...) + + return fmt.Sprintf("%s %s", binary, cmd) +} + +// CreateRemoteSecret creates a secret in the remote cluster +// Arguments: +// - remoteKubeconfig: kubeconfig of the remote cluster +// - secretName: name of the secret +// - internalIP: internal IP of the remote cluster +func CreateRemoteSecret(remoteKubeconfig string, secretName string, internalIP string) (string, error) { + cmd := istioctl("create-remote-secret --kubeconfig %s --name %s --server=https://%s:6443", remoteKubeconfig, secretName, internalIP) + yaml, err := shell.ExecuteCommand(cmd) + + return yaml, err +} diff --git a/tests/e2e/util/kubectl/kubectl.go b/tests/e2e/util/kubectl/kubectl.go index 7d4e8f054..931470851 100644 --- a/tests/e2e/util/kubectl/kubectl.go +++ b/tests/e2e/util/kubectl/kubectl.go @@ -25,6 +25,14 @@ import ( const DefaultBinary = "kubectl" +// optionalKubeconfig add the flag --kubeconfig if the kubeconfig is set +func optionalKubeconfig(kubeconfig []string) string { + if len(kubeconfig) > 0 && kubeconfig[0] != "" { + return fmt.Sprintf("--kubeconfig %s", kubeconfig[0]) + } + return "" +} + // kubectl return the kubectl command // If the environment variable COMMAND is set, it will return the value of COMMAND // Otherwise, it will return the default value "kubectl" as default @@ -41,8 +49,8 @@ func kubectl(format string, args ...interface{}) string { } // CreateFromString creates a resource from the given yaml string -func CreateFromString(yamlString string) error { - cmd := kubectl("create -f -") +func CreateFromString(yamlString string, kubeconfig ...string) error { + cmd := kubectl("create %s -f -", optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommandWithInput(cmd, yamlString) if err != nil { return fmt.Errorf("error creating resource from yaml: %w", err) @@ -51,8 +59,15 @@ func CreateFromString(yamlString string) error { } // ApplyString applies the given yaml string to the cluster -func ApplyString(ns, yamlString string) error { - cmd := kubectl("apply -n %s --server-side -f -", ns) +func ApplyString(ns, yamlString string, kubeconfig ...string) error { + nsflag := nsflag(ns) + // If the namespace is empty, we need to remove the flag because it will fail + // TODO: improve the nsflag function to handle this case + if ns == "" { + nsflag = "" + } + + cmd := kubectl("apply %s %s --server-side -f -", nsflag, optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommandWithInput(cmd, yamlString) if err != nil { return fmt.Errorf("error applying yaml: %w", err) @@ -62,8 +77,14 @@ func ApplyString(ns, yamlString string) error { } // Apply applies the given yaml file to the cluster -func Apply(ns, yamlFile string) error { - cmd := kubectl("apply -n %s -f %s", ns, yamlFile) +func Apply(ns, yamlFile string, kubeconfig ...string) error { + err := ApplyWithLabels(ns, yamlFile, "", kubeconfig...) + return err +} + +// ApplyWithLabels applies the given yaml file to the cluster with the given labels +func ApplyWithLabels(ns, yamlFile string, label string, kubeconfig ...string) error { + cmd := kubectl("apply -n %s %s -f %s %s", ns, labelFlag(label), yamlFile, optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error applying yaml: %w", err) @@ -72,10 +93,24 @@ func Apply(ns, yamlFile string) error { return nil } +// DeleteFromFile deletes a resource from the given yaml file +func DeleteFromFile(yamlFile string, kubeconfig ...string) error { + cmd := kubectl("delete -f %s %s", yamlFile, optionalKubeconfig(kubeconfig)) + _, err := shell.ExecuteCommand(cmd) + if err != nil { + return fmt.Errorf("error deleting resource from yaml: %w", err) + } + + return nil +} + // CreateNamespace creates a namespace // If the namespace already exists, it will return nil -func CreateNamespace(ns string) error { - cmd := kubectl("create namespace %s", ns) +// Arguments: +// - ns: namespace +// - kubeconfig: optional kubeconfig to set the target file +func CreateNamespace(ns string, kubeconfig ...string) error { + cmd := kubectl("create namespace %s %s", ns, optionalKubeconfig(kubeconfig)) output, err := shell.ExecuteCommand(cmd) if err != nil { if strings.Contains(output, "AlreadyExists") { @@ -89,8 +124,11 @@ func CreateNamespace(ns string) error { } // DeleteNamespace deletes a namespace -func DeleteNamespace(ns string) error { - cmd := kubectl("delete namespace %s", ns) +// Arguments: +// - ns: namespace +// - kubeconfig: optional kubeconfig to set the target file +func DeleteNamespace(ns string, kubeconfig ...string) error { + cmd := kubectl("delete namespace %s %s", ns, optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error deleting namespace: %w", err) @@ -99,9 +137,9 @@ func DeleteNamespace(ns string) error { return nil } -// Delete deletes a resource based on the namespace, kind and the name -func Delete(ns, kind, name string) error { - cmd := kubectl("delete %s %s %s", kind, name, nsflag(ns)) +// Delete deletes a resource based on the namespace, kind and the name. Optionally, you can provide a kubeconfig +func Delete(ns, kind, name string, kubeconfig ...string) error { + cmd := kubectl("delete %s %s %s %s", kind, name, nsflag(ns), optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error deleting deployment: %w", err) @@ -124,8 +162,8 @@ func DeleteCRDs(crds []string) error { } // Patch patches a resource. -func Patch(ns, kind, name, patchType, patch string) error { - cmd := kubectl(`patch %s %s %s --type=%s -p=%q`, kind, name, prepend("-n", ns), patchType, patch) +func Patch(ns, kind, name, patchType, patch string, kubeconfig ...string) error { + cmd := kubectl(`patch %s %s %s %s --type=%s -p=%q`, kind, name, prepend("-n", ns), optionalKubeconfig(kubeconfig), patchType, patch) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error patching resource: %w", err) @@ -152,8 +190,13 @@ func GetYAML(ns, kind, name string) (string, error) { } // GetPods returns the pods of a namespace -func GetPods(ns string, args ...string) (string, error) { - cmd := kubectl("get pods %s %s", nsflag(ns), strings.Join(args, " ")) +func GetPods(ns string, kubeconfig string, args ...string) (string, error) { + kubeconfigFlag := "" + if kubeconfig != "" { + kubeconfigFlag = fmt.Sprintf("--kubeconfig %s", kubeconfig) + } + + cmd := kubectl("get pods %s %s %s", nsflag(ns), strings.Join(args, " "), kubeconfigFlag) output, err := shell.ExecuteCommand(cmd) if err != nil { return "", fmt.Errorf("error getting pods: %w, output: %s", err, output) @@ -188,6 +231,20 @@ func Describe(ns, kind, name string) (string, error) { return output, nil } +// GetInternalIP returns the internal IP of a node +// Arguments: +// - label: label of the node +// - kubeconfig: optional kubeconfig to set the target file +func GetInternalIP(label string, kubeconfig ...string) (string, error) { + cmd := kubectl("get nodes -l %s -o jsonpath='{.items[0].status.addresses[?(@.type==\"InternalIP\")].address}' %s", label, optionalKubeconfig(kubeconfig)) + output, err := shell.ExecuteCommand(cmd) + if err != nil { + return "", fmt.Errorf("error getting internal IP: %w, output: %s", err, output) + } + + return output, nil +} + // Logs returns the logs of a deployment // Arguments: // - ns: namespace @@ -210,8 +267,8 @@ func sinceFlag(since *time.Duration) string { } // Exec executes a command in the pod or specific container -func Exec(ns, pod, container, command string) (string, error) { - cmd := kubectl("exec %s %s %s -- %s", pod, containerflag(container), nsflag(ns), command) +func Exec(ns, pod, container, command string, kubeconfig ...string) (string, error) { + cmd := kubectl("exec %s %s %s %s -- %s", pod, containerflag(container), nsflag(ns), optionalKubeconfig(kubeconfig), command) output, err := shell.ExecuteCommand(cmd) if err != nil { return "", err @@ -234,6 +291,13 @@ func nsflag(ns string) string { return "-n " + ns } +func labelFlag(label string) string { + if label == "" { + return "" + } + return "-l " + label +} + func containerflag(container string) string { if container == "" { return "" From 005d1c37cb295ef16c1d1b6986aa12016189c760 Mon Sep 17 00:00:00 2001 From: Francisco Herrera Date: Tue, 24 Sep 2024 12:06:24 +0200 Subject: [PATCH 11/25] Adding wait after olm install in olm test setup (#357) Adding sleep Adding timout to wait in olm setup Delete sleep Adding more debug information Adding more debug information when operator-sdk fails Fix label on wait condition Change wait condition for catalogSource Delete comment Improve wait for catalogSource Signed-off-by: frherrer --- tests/e2e/common-operator-integ-suite.sh | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/tests/e2e/common-operator-integ-suite.sh b/tests/e2e/common-operator-integ-suite.sh index 935cd1a3e..304c2b005 100755 --- a/tests/e2e/common-operator-integ-suite.sh +++ b/tests/e2e/common-operator-integ-suite.sh @@ -244,10 +244,27 @@ if [ "${SKIP_BUILD}" == "false" ]; then # Install OLM in the cluster because it's not available by default in kind. ${OPERATOR_SDK} olm install + # Wait for for the CatalogSource to be CatalogSource.status.connectionState.lastObservedState == READY + ${COMMAND} wait catalogsource operatorhubio-catalog -n olm --for 'jsonpath={.status.connectionState.lastObservedState}=READY' --timeout=5m + # Create operator namespace ${COMMAND} create ns "${NAMESPACE}" || echo "Creation of namespace ${NAMESPACE} failed with the message: $?" # Deploy the operator using OLM - ${OPERATOR_SDK} run bundle "${BUNDLE_IMG}" -n "${NAMESPACE}" --skip-tls --timeout 5m + ${OPERATOR_SDK} run bundle "${BUNDLE_IMG}" -n "${NAMESPACE}" --skip-tls --timeout 5m || { + echo "****** run bundle failed, running debug information" + # Get all the pods in the namespace + ${COMMAND} get pods -n "${NAMESPACE}" + + # Get all the pods in olm namespace + ${COMMAND} get pods -n olm + + # Describe all the olm pods by iterating over the pods + for pod in $(${COMMAND} get pods -n olm -o name); do + echo "*** Describing pod: ${pod}" + ${COMMAND} describe "${pod}" + done + exit 1 + } # Wait for the operator to be ready ${COMMAND} wait --for=condition=available deployment/"${DEPLOYMENT_NAME}" -n "${NAMESPACE}" --timeout=5m From dbf31fe0186857ecb18c68740311f106ca0ab7f2 Mon Sep 17 00:00:00 2001 From: Daniel Grimm Date: Tue, 24 Sep 2024 15:39:23 +0200 Subject: [PATCH 12/25] Automatically adjust default release channel (#353) This will make sure that the channel field is pre-populated with a good default when we run the release workflow. Signed-off-by: Daniel Grimm --- .github/workflows/release.yaml | 2 +- Makefile.core.mk | 14 +++++++++++--- bundle.Dockerfile | 2 +- bundle/metadata/annotations.yaml | 2 +- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 89a72465c..0ddb40159 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -9,7 +9,7 @@ on: bundle_channels: description: "Bundle channels" required: true - default: "candidates" + default: dev-0.2 is_draft_release: description: "Draft release" type: boolean diff --git a/Makefile.core.mk b/Makefile.core.mk index 07a3ce318..f8f34e443 100644 --- a/Makefile.core.mk +++ b/Makefile.core.mk @@ -79,7 +79,12 @@ GINKGO_FLAGS := $(if $(VERBOSE),-v) $(if $(CI),--no-color) # To re-generate a bundle for other specific channels without changing the standard setup, you can: # - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) # - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") -CHANNELS ?= ${MINOR_VERSION} +CHANNEL_PREFIX := dev +ifneq (,$(findstring release-,$(shell git rev-parse --abbrev-ref HEAD))) +CHANNEL_PREFIX = stable +endif + +CHANNELS ?= $(CHANNEL_PREFIX)-$(MINOR_VERSION) ifneq ($(origin CHANNELS), undefined) BUNDLE_CHANNELS = --channels=\"$(CHANNELS)\" endif @@ -381,7 +386,7 @@ gen-charts: ## Pull charts from istio repository. gen: gen-all-except-bundle bundle ## Generate everything. .PHONY: gen-all-except-bundle -gen-all-except-bundle: operator-name operator-chart controller-gen gen-api gen-charts gen-manifests gen-code gen-api-docs +gen-all-except-bundle: operator-name operator-chart controller-gen gen-api gen-charts gen-manifests gen-code gen-api-docs github-workflow .PHONY: gen-check gen-check: gen restore-manifest-dates check-clean-repo ## Verify that changes in generated resources have been checked in. @@ -425,6 +430,9 @@ operator-chart: sed -i -e "s|^\(image: \).*$$|\1${IMAGE}|g" \ -e "s/^\( version: \).*$$/\1${VERSION}/g" chart/values.yaml +github-workflow: + sed -i -e '1,/default:/ s/^\(.*default:\).*$$/\1 ${CHANNELS}/' .github/workflows/release.yaml + .PHONY: update-istio update-istio: ## Update the Istio commit hash in the 'latest' entry in versions.yaml to the latest commit in the branch. @hack/update-istio.sh @@ -648,7 +656,7 @@ git-hook: gitleaks ## Installs gitleaks as a git pre-commit hook. chmod +x .git/hooks/pre-commit; \ fi -.SILENT: helm $(HELM) $(LOCALBIN) deploy-yaml gen-api operator-name operator-chart +.SILENT: helm $(HELM) $(LOCALBIN) deploy-yaml gen-api operator-name operator-chart github-workflow COMMON_IMPORTS ?= lint-all lint-scripts lint-copyright-banner lint-go lint-yaml lint-helm format-go tidy-go check-clean-repo update-common .PHONY: $(COMMON_IMPORTS) diff --git a/bundle.Dockerfile b/bundle.Dockerfile index 7120f4329..5bc812405 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -5,7 +5,7 @@ LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ LABEL operators.operatorframework.io.bundle.package.v1=sailoperator -LABEL operators.operatorframework.io.bundle.channels.v1="0.2" +LABEL operators.operatorframework.io.bundle.channels.v1="dev-0.2" LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.36.1 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 diff --git a/bundle/metadata/annotations.yaml b/bundle/metadata/annotations.yaml index 7b7ca23e4..e520fa1cb 100644 --- a/bundle/metadata/annotations.yaml +++ b/bundle/metadata/annotations.yaml @@ -4,7 +4,7 @@ annotations: operators.operatorframework.io.bundle.manifests.v1: manifests/ operators.operatorframework.io.bundle.metadata.v1: metadata/ operators.operatorframework.io.bundle.package.v1: sailoperator - operators.operatorframework.io.bundle.channels.v1: "0.2" + operators.operatorframework.io.bundle.channels.v1: "dev-0.2" operators.operatorframework.io.metrics.builder: operator-sdk-v1.36.1 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 From 4c6488726821c04ffb8aafba6973f923a8ba689c Mon Sep 17 00:00:00 2001 From: Francisco Herrera Date: Tue, 24 Sep 2024 16:19:23 +0200 Subject: [PATCH 13/25] Update test labels in the wayofwork documentation (#359) Signed-off-by: frherrer --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d71a391ea..40e05748e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,7 +25,7 @@ If you want to contribute to the Sail Operator project, you can follow some rule - Discuss your changes before you start working on them. You can open a new issue in the [Sail Operator GitHub repository](https://github.com/istio-ecosystem/sail-operator/issues) or start a discussion in the [Sail Operator Discussion](https://github.com/istio-ecosystem/sail-operator/discussions). By this way, you can get feedback from the community and ensure that your changes are aligned with the project goals. - Use of Labels: We use labels in the issues to help us track the progress of the issues. You can use the labels to help you understand the status of the issue and what is needed to move forward. Those labels are: - `backport/backport-handled`: Use this label to indicate that the issue has been backported to the appropriate branches. - - `testing`: Use this label to indicate that the issue is related to testing. Can be used in combination with other labels to mark the proper testing type, for example: `testing/e2e`, `testing/unit`, `testing/integration`. + - `test`: Use this label to indicate that the issue is related to test or add `test-needed` when a issue needs a test to be added related. Can be used in combination with other labels to mark the proper test type, for example: `test-e2e`, `test-unit`, `test-integration`. - `good first issue`: Use this label to indicate that the issue is a good first issue for new contributors. - `help wanted`: Use this label to indicate that the issue needs help from the community. - `enhancement`: Use this label to indicate that the issue is an enhancement related to a new feature or improvement. From 87f9fbd6ef42f0af472d646ee4f4c8127e06958f Mon Sep 17 00:00:00 2001 From: Maxim Babushkin Date: Tue, 24 Sep 2024 18:40:24 +0300 Subject: [PATCH 14/25] Add CODE-OF-CONDUCT to the project (#362) In order to follow the project community standards, adding the CODE-OF-CONDACT to the repository. Signed-off-by: Maxim Babushkin --- CODE-OF-CONDUCT.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 CODE-OF-CONDUCT.md diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..880f0a278 --- /dev/null +++ b/CODE-OF-CONDUCT.md @@ -0,0 +1,7 @@ +# Sail Operator Community Code of Conduct + +As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. + +All members of the Sail Operator community must abide by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). +Only by respecting each other can we develop a productive, collaborative community. + From ae7ba932d1a53f88eccbf46a3ed1d01661f62340 Mon Sep 17 00:00:00 2001 From: Sridhar Gaddam Date: Wed, 25 Sep 2024 19:24:26 +0530 Subject: [PATCH 15/25] Fix formatting in the charts readme page (#364) Signed-off-by: Sridhar Gaddam --- bundle/README.md | 12 ++++++------ chart/README.md | 40 ++++++++++++++++++++-------------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/bundle/README.md b/bundle/README.md index 50aaf7008..078ef4d31 100644 --- a/bundle/README.md +++ b/bundle/README.md @@ -88,9 +88,9 @@ drop down menu when creating a new `Istio` with the OpenShift Container Platform web console. For a list of available versions, see the [versions.yaml](/versions.yaml) file or use the command: - ```sh - $ kubectl explain istio.spec.version - ``` +```sh +$ kubectl explain istio.spec.version +``` ### Customizing Istio configuration @@ -131,9 +131,9 @@ spec: For a list of available configuration for the `spec.values` field, run the following command: - ```sh - $ kubectl explain istio.spec.values - ``` +```sh +$ kubectl explain istio.spec.values +``` For the `IstioCNI` resource, replace `istio` with `istiocni` in the command above. diff --git a/chart/README.md b/chart/README.md index c2e4cf792..3a81b42f9 100644 --- a/chart/README.md +++ b/chart/README.md @@ -31,9 +31,9 @@ The extract command will create the `sail-operator` directory with the helm char This section describes the procedure to install `Sail Operator` using Helm. The general syntax for helm installation is: - ```sh - helm install --create-namespace --namespace [--set ] - ``` +```sh +$ helm install --create-namespace --namespace [--set ] +``` The variables specified in the command are as follows: * `` - A name to identify and manage the Helm chart once installed. @@ -169,9 +169,9 @@ An example configuration: For a list of available configuration for the `spec.values` field, run the following command: - ```sh - $ kubectl explain istio.spec.values - ``` +```sh +$ kubectl explain istio.spec.values +``` For the `IstioCNI` resource, replace `istio` with `istiocni` in the command above. @@ -223,26 +223,26 @@ For installation steps, refer to the following [link](../docs/common/istio-addon ### Deleting Istio - ```sh - $ kubectl -n istio-system delete istio default - ``` +```sh +$ kubectl -n istio-system delete istio default +``` ### Deleting IstioCNI (in OpenShift cluster platform) - ```sh - $ kubectl -n istio-cni delete istiocni default - ``` +```sh +$ kubectl -n istio-cni delete istiocni default +``` ### Uninstall the Sail Operator using Helm - ```sh - $ helm uninstall sail-operator --namespace sail-operator - ``` +```sh +$ helm uninstall sail-operator --namespace sail-operator +``` ### Deleting the Project namespaces - ```sh - $ kubectl delete namespace istio-system - $ kubectl delete namespace istio-cni - $ kubectl delete namespace sail-operator - ``` +```sh +$ kubectl delete namespace istio-system +$ kubectl delete namespace istio-cni +$ kubectl delete namespace sail-operator +``` From 96e2c0eaf77af7c02dbd60c653975a697f9fe98b Mon Sep 17 00:00:00 2001 From: Maxim Babushkin Date: Thu, 26 Sep 2024 19:06:23 +0300 Subject: [PATCH 16/25] Add pull request template (#366) The pull request template will help to add important information for the reviewers during new PR creation. Signed-off-by: Maxim Babushkin --- .github/pull_request_template.md | 70 ++++++++++++++++++++++++++++++++ CONTRIBUTING.md | 12 +----- 2 files changed, 71 insertions(+), 11 deletions(-) create mode 100644 .github/pull_request_template.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..fc34ef026 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,70 @@ + + +#### What type of PR is this? + + +- [ ] Enhancement / New Feature +- [ ] Bug Fix +- [ ] Refactor +- [ ] Optimization +- [ ] Test +- [ ] Documentation Update + +#### What this PR does / why we need it: +```text + +``` + +#### Which issue(s) this PR fixes: + +Fixes # + +Related Issue/PR # + +#### Special notes for your reviewer: +```text + +``` + +#### Does this PR introduce a user-facing change? + +```release-note + +``` + +#### Additional documentation: +```text + +``` + +#### Does this PR introduce a breaking change? +```text + +``` + +#### Other information: +```text + +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 40e05748e..8d6421f53 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,16 +29,6 @@ If you want to contribute to the Sail Operator project, you can follow some rule - `good first issue`: Use this label to indicate that the issue is a good first issue for new contributors. - `help wanted`: Use this label to indicate that the issue needs help from the community. - `enhancement`: Use this label to indicate that the issue is an enhancement related to a new feature or improvement. -- Pull Requests: When you open a pull request, you can follow this template to help you provide the necessary information to the maintainers: - - **What type of PR is this?** - - **What this PR does / why we need it:** - - **Which issue(s) this PR fixes:** (Mark with Fixes #12345, with this the issue will be autoclosed when the PR is merged) - - **Special notes for your reviewer:** - - **Does this PR introduce a user-facing change?** - - **Additional documentation:** - - **Does this PR introduce a breaking change?** - - **Other information:** - - Labels: You can use the labels to help you track the status of the PR. The labels are the same as the issue labels. Additionally, you can use the `cleanup/refactor` to indicate that the PR is a cleanup or refactor of the codebase. Having the label just helps with filtering pull requests. It also is a hint that this work does not need an entry in the changelog ## Community meetings @@ -46,4 +36,4 @@ This is not defined yet. We are working on defining the community meetings and h ## Security Issues -If you find a security issue in the Sail Operator project, please refer to the [Security Policy](https://github.com/istio-ecosystem/sail-operator/security/policy) for more information on how to report security issues. Please do not report security issues in the public GitHub repository. \ No newline at end of file +If you find a security issue in the Sail Operator project, please refer to the [Security Policy](https://github.com/istio-ecosystem/sail-operator/security/policy) for more information on how to report security issues. Please do not report security issues in the public GitHub repository. From c199a1bc6ff839c353397335d74588dbf4162fc8 Mon Sep 17 00:00:00 2001 From: Maxim Babushkin Date: Fri, 27 Sep 2024 09:37:24 +0300 Subject: [PATCH 17/25] Update helm chart README (#370) It's now possible to deploy Sail Operator with Helm and use the repository as "helm repo" instead of the need to clone the repo. Update the chart readme file with new way of deployment. Signed-off-by: Maxim Babushkin --- chart/README.md | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/chart/README.md b/chart/README.md index 3a81b42f9..e822fed6d 100644 --- a/chart/README.md +++ b/chart/README.md @@ -16,16 +16,10 @@ OpenShift: ## Prepare the Helm charts -**Note** - `Sail Operator` could be installed by downloading the release artifacts from the [release page](https://github.com/istio-ecosystem/sail-operator/releases). - -* Download the required release artifact -* Extract it locally. - - ```sh - $ tar -xvf /tmp/sail-operator-.tgz - ``` - -The extract command will create the `sail-operator` directory with the helm charts in it. +```sh +$ helm repo add sail-operator https://istio-ecosystem.github.io/sail-operator +$ helm repo update +``` ## Installation steps @@ -55,13 +49,13 @@ Default configuration values can be changed using one or more `--set * Kubernetes ```sh - $ helm install sail-operator sail-operator/ --namespace sail-operator + $ helm install sail-operator sail-operator/sail-operator --namespace sail-operator ``` * OpenShift ```sh - $ helm install sail-operator sail-operator/ --namespace sail-operator --set platform=openshift + $ helm install sail-operator sail-operator/sail-operator --namespace sail-operator --set platform=openshift ``` 3. Validate the CRD installation with the `helm ls` command: @@ -69,8 +63,8 @@ Default configuration values can be changed using one or more `--set ```sh $ helm ls -n sail-operator - NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - sail-operator sail-operator 1 2024-09-16 12:43:18.786846217 +0300 IDT deployed sail-operator-0.1.0-rc.1 0.1.0-rc.1 + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + sail-operator sail-operator 1 2024-09-26 21:15:52.508983383 +0300 IDT deployed sail-operator-0.1.0 0.1.0 ``` 4. Get the status of the installed helm chart to ensure it is deployed: @@ -79,7 +73,7 @@ Default configuration values can be changed using one or more `--set $ helm status sail-operator -n sail-operator NAME: sail-operator - LAST DEPLOYED: Mon Sep 16 12:43:18 2024 + LAST DEPLOYED: Thu Sep 26 21:15:52 2024 NAMESPACE: sail-operator STATUS: deployed REVISION: 1 @@ -91,8 +85,8 @@ Default configuration values can be changed using one or more `--set ```sh $ kubectl -n sail-operator get deployment --output wide - NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR - sail-operator 1/1 1 1 19m kube-rbac-proxy,manager gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0,quay.io/maistra-dev/sail-operator:0.1.0-rc.1 app.kubernetes.io/created-by=sailoperator,app.kubernetes.io/part-of=sailoperator,control-plane=sail-operator + NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + sail-operator 1/1 1 1 107s kube-rbac-proxy,manager gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0,quay.io/maistra-dev/sail-operator:0.1-latest app.kubernetes.io/created-by=sailoperator,app.kubernetes.io/part-of=sailoperator,control-plane=sail-operator $ kubectl -n sail-operator get pods -o wide From 837ffbc0978707a4cbb4a426f3952c5e1d3d3a9c Mon Sep 17 00:00:00 2001 From: Francisco Herrera Date: Fri, 27 Sep 2024 16:23:24 +0200 Subject: [PATCH 18/25] E2E kubectl util refactor (#368) * E2E kubectl util refactor Signed-off-by: frherrer * Improve reset namespace in kubectl util Signed-off-by: frherrer * Deleting non needed func in the kubectl util Signed-off-by: frherrer --------- Signed-off-by: frherrer --- .../controlplane/control_plane_suite_test.go | 5 + tests/e2e/controlplane/control_plane_test.go | 52 +-- .../multicluster_multiprimary_test.go | 76 ++--- .../multicluster_primaryremote_test.go | 57 ++-- .../multicluster/multicluster_suite_test.go | 9 + tests/e2e/operator/operator_install_test.go | 5 +- tests/e2e/operator/operator_suite_test.go | 5 + tests/e2e/util/certs/certs.go | 4 +- tests/e2e/util/common/e2e_utils.go | 35 +- tests/e2e/util/kubectl/kubectl.go | 321 +++++++++--------- 10 files changed, 302 insertions(+), 267 deletions(-) diff --git a/tests/e2e/controlplane/control_plane_suite_test.go b/tests/e2e/controlplane/control_plane_suite_test.go index 872a0578a..627edfc5d 100644 --- a/tests/e2e/controlplane/control_plane_suite_test.go +++ b/tests/e2e/controlplane/control_plane_suite_test.go @@ -21,6 +21,7 @@ import ( k8sclient "github.com/istio-ecosystem/sail-operator/tests/e2e/util/client" env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,6 +42,8 @@ var ( expectedRegistry = env.Get("EXPECTED_REGISTRY", "^docker\\.io|^gcr\\.io") bookinfoNamespace = env.Get("BOOKINFO_NAMESPACE", "bookinfo") multicluster = env.GetBool("MULTICLUSTER", false) + + k *kubectl.KubectlBuilder ) func TestInstall(t *testing.T) { @@ -58,4 +61,6 @@ func setup() { GinkgoWriter.Println("Initializing k8s client") cl, err = k8sclient.InitK8sClient("") Expect(err).NotTo(HaveOccurred()) + + k = kubectl.NewKubectlBuilder() } diff --git a/tests/e2e/controlplane/control_plane_test.go b/tests/e2e/controlplane/control_plane_test.go index ec7a6574a..8beb63065 100644 --- a/tests/e2e/controlplane/control_plane_test.go +++ b/tests/e2e/controlplane/control_plane_test.go @@ -30,7 +30,6 @@ import ( common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" - "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" @@ -45,11 +44,10 @@ import ( var _ = Describe("Control Plane Installation", Ordered, func() { SetDefaultEventuallyTimeout(180 * time.Second) SetDefaultEventuallyPollingInterval(time.Second) - debugInfoLogged := false BeforeAll(func(ctx SpecContext) { - Expect(kubectl.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created") + Expect(k.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created") extraArg := "" if ocp { @@ -79,7 +77,7 @@ kind: IstioCNI metadata: name: default ` + spec - Expect(kubectl.CreateFromString(yaml)).To(Succeed(), "IstioCNI creation failed") + Expect(k.CreateFromString(yaml)).To(Succeed(), "IstioCNI creation failed") Success("IstioCNI created") cni := &v1alpha1.IstioCNI{} @@ -103,7 +101,7 @@ kind: Istio metadata: name: default ` + spec - Expect(kubectl.CreateFromString(yaml)).To(Succeed(), "Istio creation failed") + Expect(k.CreateFromString(yaml)).To(Succeed(), "Istio creation failed") Success("Istio created") istio := &v1alpha1.Istio{} @@ -126,8 +124,8 @@ metadata: Context(version.Name, func() { BeforeAll(func() { - Expect(kubectl.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Istio namespace failed to be created") - Expect(kubectl.CreateNamespace(istioCniNamespace)).To(Succeed(), "IstioCNI namespace failed to be created") + Expect(k.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Istio namespace failed to be created") + Expect(k.CreateNamespace(istioCniNamespace)).To(Succeed(), "IstioCNI namespace failed to be created") }) When("the IstioCNI CR is created", func() { @@ -142,7 +140,7 @@ spec: namespace: %s` yaml = fmt.Sprintf(yaml, version.Name, istioCniNamespace) Log("IstioCNI YAML:", indent(2, yaml)) - Expect(kubectl.CreateFromString(yaml)).To(Succeed(), "IstioCNI creation failed") + Expect(k.CreateFromString(yaml)).To(Succeed(), "IstioCNI creation failed") Success("IstioCNI created") }) @@ -174,9 +172,10 @@ spec: }) It("doesn't continuously reconcile the IstioCNI CR", func() { - Eventually(kubectl.Logs).WithArguments(namespace, "deploy/"+deploymentName, ptr.Of(30*time.Second)). - ShouldNot(ContainSubstring("Reconciliation done"), "Istio Operator is continuously reconciling") - Success("Istio Operator stopped reconciling") + Eventually(k.SetNamespace(namespace).Logs).WithArguments("deploy/"+deploymentName, ptr.Of(30*time.Second)). + ShouldNot(ContainSubstring("Reconciliation done"), "IstioCNI is continuously reconciling") + k.ResetNamespace() + Success("IstioCNI stopped reconciling") }) }) @@ -192,7 +191,7 @@ spec: namespace: %s` istioYAML = fmt.Sprintf(istioYAML, version.Name, controlPlaneNamespace) Log("Istio YAML:", indent(2, istioYAML)) - Expect(kubectl.CreateFromString(istioYAML)). + Expect(k.CreateFromString(istioYAML)). To(Succeed(), "Istio CR failed to be created") Success("Istio CR created") }) @@ -222,16 +221,17 @@ spec: }) It("doesn't continuously reconcile the Istio CR", func() { - Eventually(kubectl.Logs).WithArguments(namespace, "deploy/"+deploymentName, ptr.Of(30*time.Second)). - ShouldNot(ContainSubstring("Reconciliation done"), "Istio Operator is continuously reconciling") - Success("Istio Operator stopped reconciling") + Eventually(k.SetNamespace(namespace).Logs).WithArguments("deploy/"+deploymentName, ptr.Of(30*time.Second)). + ShouldNot(ContainSubstring("Reconciliation done"), "Istio CR is continuously reconciling") + k.ResetNamespace() + Success("Istio CR stopped reconciling") }) }) When("bookinfo is deployed", func() { BeforeAll(func() { - Expect(kubectl.CreateNamespace(bookinfoNamespace)).To(Succeed(), "Bookinfo namespace failed to be created") - Expect(kubectl.Patch("", "namespace", bookinfoNamespace, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). + Expect(k.CreateNamespace(bookinfoNamespace)).To(Succeed(), "Bookinfo namespace failed to be created") + Expect(k.Patch("namespace", bookinfoNamespace, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). To(Succeed(), "Error patching bookinfo namespace") Expect(deployBookinfo(version)).To(Succeed(), "Error deploying bookinfo") Success("Bookinfo deployed") @@ -261,14 +261,14 @@ spec: AfterAll(func(ctx SpecContext) { By("Deleting bookinfo") - Expect(kubectl.DeleteNamespace(bookinfoNamespace)).To(Succeed(), "Bookinfo namespace failed to be deleted") + Expect(k.DeleteNamespace(bookinfoNamespace)).To(Succeed(), "Bookinfo namespace failed to be deleted") Success("Bookinfo deleted") }) }) When("the Istio CR is deleted", func() { BeforeEach(func() { - Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName)).To(Succeed(), "Istio CR failed to be deleted") + Expect(k.SetNamespace(controlPlaneNamespace).Delete("istio", istioName)).To(Succeed(), "Istio CR failed to be deleted") Success("Istio CR deleted") }) @@ -282,7 +282,7 @@ spec: When("the IstioCNI CR is deleted", func() { BeforeEach(func() { - Expect(kubectl.Delete(istioCniNamespace, "istiocni", istioCniName)).To(Succeed(), "IstioCNI CR failed to be deleted") + Expect(k.SetNamespace(istioCniNamespace).Delete("istiocni", istioCniName)).To(Succeed(), "IstioCNI CR failed to be deleted") Success("IstioCNI deleted") }) @@ -336,7 +336,7 @@ spec: Success("Skipping deletion of operator namespace to avoid removal of operator container image from internal registry") return } - Expect(kubectl.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted") + Expect(k.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted") Success("Namespace deleted") }) }) @@ -357,17 +357,17 @@ func indent(level int, str string) string { func forceDeleteIstioResources() error { // This is a workaround to delete the Istio CRs that are left in the cluster // This will be improved by splitting the tests into different Nodes with their independent setups and cleanups - err := kubectl.ForceDelete("", "istio", istioName) + err := k.ForceDelete("istio", istioName) if err != nil && !strings.Contains(err.Error(), "not found") { return fmt.Errorf("failed to delete %s CR: %w", "istio", err) } - err = kubectl.ForceDelete("", "istiorevision", "default") + err = k.ForceDelete("istiorevision", "default") if err != nil && !strings.Contains(err.Error(), "not found") { return fmt.Errorf("failed to delete %s CR: %w", "istiorevision", err) } - err = kubectl.Delete("", "istiocni", istioCniName) + err = k.Delete("istiocni", istioCniName) if err != nil && !strings.Contains(err.Error(), "not found") { return fmt.Errorf("failed to delete %s CR: %w", "istiocni", err) } @@ -388,7 +388,7 @@ func getBookinfoURL(version supportedversion.VersionInfo) string { func deployBookinfo(version supportedversion.VersionInfo) error { bookinfoURL := getBookinfoURL(version) - kubectl.Apply(bookinfoNamespace, bookinfoURL) + k.SetNamespace(bookinfoNamespace).Apply(bookinfoURL) if err != nil { return fmt.Errorf("error deploying bookinfo: %w", err) } @@ -397,7 +397,7 @@ func deployBookinfo(version supportedversion.VersionInfo) error { } func getProxyVersion(podName, namespace string) (string, error) { - proxyVersion, err := kubectl.Exec(namespace, + proxyVersion, err := k.SetNamespace(namespace).Exec( podName, "istio-proxy", `curl -s http://localhost:15000/server_info | grep "ISTIO_VERSION" | awk -F '"' '{print $4}'`) diff --git a/tests/e2e/multicluster/multicluster_multiprimary_test.go b/tests/e2e/multicluster/multicluster_multiprimary_test.go index 97c404c52..687e763f7 100644 --- a/tests/e2e/multicluster/multicluster_multiprimary_test.go +++ b/tests/e2e/multicluster/multicluster_multiprimary_test.go @@ -49,8 +49,8 @@ var _ = Describe("Multicluster deployment models", Ordered, func() { BeforeAll(func(ctx SpecContext) { if !skipDeploy { // Deploy the Sail Operator on both clusters - Expect(kubectl.CreateNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be created on Cluster #1") - Expect(kubectl.CreateNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created on Cluster #2") + Expect(kubectlClient1.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created on Cluster #1") + Expect(kubectlClient2.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created on Cluster #2") Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig)). To(Succeed(), "Operator failed to be deployed in Cluster #1") @@ -76,8 +76,8 @@ var _ = Describe("Multicluster deployment models", Ordered, func() { Context("Istio version is: "+version.Version, func() { When("Istio resources are created in both clusters with multicluster configuration", func() { BeforeAll(func(ctx SpecContext) { - Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be created") - Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + Expect(kubectlClient1.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be created") + Expect(kubectlClient2.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be created") // Push the intermediate CA to both clusters certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig, "east", "network1", artifacts, clPrimary) @@ -110,11 +110,11 @@ spec: network: %s` multiclusterCluster1YAML := fmt.Sprintf(multiclusterYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster1", "network1") Log("Istio CR Cluster #1: ", multiclusterCluster1YAML) - Expect(kubectl.CreateFromString(multiclusterCluster1YAML, kubeconfig)).To(Succeed(), "Istio Resource creation failed on Cluster #1") + Expect(kubectlClient1.CreateFromString(multiclusterCluster1YAML)).To(Succeed(), "Istio Resource creation failed on Cluster #1") multiclusterCluster2YAML := fmt.Sprintf(multiclusterYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster2", "network2") Log("Istio CR Cluster #2: ", multiclusterCluster2YAML) - Expect(kubectl.CreateFromString(multiclusterCluster2YAML, kubeconfig2)).To(Succeed(), "Istio Resource creation failed on Cluster #2") + Expect(kubectlClient2.CreateFromString(multiclusterCluster2YAML)).To(Succeed(), "Istio Resource creation failed on Cluster #2") }) It("updates both Istio CR status to Ready", func(ctx SpecContext) { @@ -146,13 +146,13 @@ spec: When("Gateway is created in both clusters", func() { BeforeAll(func(ctx SpecContext) { - Expect(kubectl.Apply(controlPlaneNamespace, eastGatewayYAML, kubeconfig)).To(Succeed(), "Gateway creation failed on Cluster #1") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Apply(eastGatewayYAML)).To(Succeed(), "Gateway creation failed on Cluster #1") - Expect(kubectl.Apply(controlPlaneNamespace, westGatewayYAML, kubeconfig2)).To(Succeed(), "Gateway creation failed on Cluster #2") + Expect(kubectlClient2.SetNamespace(controlPlaneNamespace).Apply(westGatewayYAML)).To(Succeed(), "Gateway creation failed on Cluster #2") // Expose the Gateway service in both clusters - Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig)).To(Succeed(), "Expose Service creation failed on Cluster #1") - Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig2)).To(Succeed(), "Expose Service creation failed on Cluster #2") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Apply(exposeServiceYAML)).To(Succeed(), "Expose Service creation failed on Cluster #1") + Expect(kubectlClient2.SetNamespace(controlPlaneNamespace).Apply(exposeServiceYAML)).To(Succeed(), "Expose Service creation failed on Cluster #2") }) It("updates both Gateway status to Available", func(ctx SpecContext) { @@ -170,23 +170,23 @@ spec: When("are installed remote secrets on each cluster", func() { BeforeAll(func(ctx SpecContext) { // Get the internal IP of the control plane node in both clusters - internalIPCluster1, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig) + internalIPCluster1, err := kubectlClient1.GetInternalIP("node-role.kubernetes.io/control-plane") Expect(err).NotTo(HaveOccurred()) Expect(internalIPCluster1).NotTo(BeEmpty(), "Internal IP is empty for Cluster #1") - internalIPCluster2, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig2) + internalIPCluster2, err := kubectlClient2.GetInternalIP("node-role.kubernetes.io/control-plane") Expect(internalIPCluster2).NotTo(BeEmpty(), "Internal IP is empty for Cluster #2") Expect(err).NotTo(HaveOccurred()) // Install a remote secret in Cluster #1 that provides access to the Cluster #2 API server. secret, err := istioctl.CreateRemoteSecret(kubeconfig2, "cluster2", internalIPCluster2) Expect(err).NotTo(HaveOccurred()) - Expect(kubectl.ApplyString("", secret, kubeconfig)).To(Succeed(), "Remote secret creation failed on Cluster #1") + Expect(kubectlClient1.ApplyString(secret)).To(Succeed(), "Remote secret creation failed on Cluster #1") // Install a remote secret in Cluster #2 that provides access to the Cluster #1 API server. secret, err = istioctl.CreateRemoteSecret(kubeconfig, "cluster1", internalIPCluster1) Expect(err).NotTo(HaveOccurred()) - Expect(kubectl.ApplyString("", secret, kubeconfig2)).To(Succeed(), "Remote secret creation failed on Cluster #1") + Expect(kubectlClient2.ApplyString(secret)).To(Succeed(), "Remote secret creation failed on Cluster #1") }) It("remote secrets are created", func(ctx SpecContext) { @@ -204,7 +204,7 @@ spec: When("sample apps are deployed in both clusters", func() { BeforeAll(func(ctx SpecContext) { // Deploy the sample app in both clusters - deploySampleApp("sample", version, kubeconfig, kubeconfig2) + deploySampleApp("sample", version) Success("Sample app is deployed in both clusters") }) @@ -242,12 +242,12 @@ spec: Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Cluster #2") // Run the curl command from the sleep pod in the Cluster #2 and get response list to validate that we get responses from both clusters - Cluster2Responses := strings.Join(getListCurlResponses(sleepPodNameCluster2, kubeconfig2), "\n") + Cluster2Responses := strings.Join(getListCurlResponses(kubectlClient2, sleepPodNameCluster2), "\n") Expect(Cluster2Responses).To(ContainSubstring("Hello version: v1"), "Responses from Cluster #2 are not the expected") Expect(Cluster2Responses).To(ContainSubstring("Hello version: v2"), "Responses from Cluster #2 are not the expected") // Run the curl command from the sleep pod in the Cluster #1 and get response list to validate that we get responses from both clusters - Cluster1Responses := strings.Join(getListCurlResponses(sleepPodNameCluster1, kubeconfig), "\n") + Cluster1Responses := strings.Join(getListCurlResponses(kubectlClient1, sleepPodNameCluster1), "\n") Expect(Cluster1Responses).To(ContainSubstring("Hello version: v1"), "Responses from Cluster #1 are not the expected") Expect(Cluster1Responses).To(ContainSubstring("Hello version: v2"), "Responses from Cluster #1 are not the expected") Success("Sample app is accessible from both clusters") @@ -257,8 +257,8 @@ spec: When("istio CR is deleted in both clusters", func() { BeforeEach(func() { // Delete the Istio CR in both clusters - Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig)).To(Succeed(), "Istio CR failed to be deleted") - Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig2)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Delete("istio", istioName)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectlClient2.SetNamespace(controlPlaneNamespace).Delete("istio", istioName)).To(Succeed(), "Istio CR failed to be deleted") Success("Istio CR is deleted in both clusters") }) @@ -273,16 +273,16 @@ spec: AfterAll(func(ctx SpecContext) { // Delete namespace to ensure clean up for new tests iteration - Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") - Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + Expect(kubectlClient1.DeleteNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectlClient2.DeleteNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") common.CheckNamespaceEmpty(ctx, clPrimary, controlPlaneNamespace) common.CheckNamespaceEmpty(ctx, clRemote, controlPlaneNamespace) Success("ControlPlane Namespaces are empty") // Delete the entire sample namespace in both clusters - Expect(kubectl.DeleteNamespace("sample", kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") - Expect(kubectl.DeleteNamespace("sample", kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + Expect(kubectlClient1.DeleteNamespace("sample")).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectlClient2.DeleteNamespace("sample")).To(Succeed(), "Namespace failed to be deleted on Cluster #2") common.CheckNamespaceEmpty(ctx, clPrimary, "sample") common.CheckNamespaceEmpty(ctx, clRemote, "sample") @@ -294,8 +294,8 @@ spec: AfterAll(func(ctx SpecContext) { // Delete the Sail Operator from both clusters - Expect(kubectl.DeleteNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") - Expect(kubectl.DeleteNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + Expect(kubectlClient1.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectlClient2.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") // Delete the intermediate CA from both clusters common.CheckNamespaceEmpty(ctx, clPrimary, namespace) @@ -304,15 +304,15 @@ spec: }) // deploySampleApp deploys the sample app in the given cluster -func deploySampleApp(ns string, istioVersion supportedversion.VersionInfo, kubeconfig string, kubeconfig2 string) { +func deploySampleApp(ns string, istioVersion supportedversion.VersionInfo) { // Create the namespace - Expect(kubectl.CreateNamespace(ns, kubeconfig)).To(Succeed(), "Namespace failed to be created") - Expect(kubectl.CreateNamespace(ns, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + Expect(kubectlClient1.CreateNamespace(ns)).To(Succeed(), "Namespace failed to be created") + Expect(kubectlClient2.CreateNamespace(ns)).To(Succeed(), "Namespace failed to be created") // Label the namespace - Expect(kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). + Expect(kubectlClient1.Patch("namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). To(Succeed(), "Error patching sample namespace") - Expect(kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`, kubeconfig2)). + Expect(kubectlClient2.Patch("namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). To(Succeed(), "Error patching sample namespace") version := istioVersion.Version @@ -321,22 +321,22 @@ func deploySampleApp(ns string, istioVersion supportedversion.VersionInfo, kubec version = "master" } helloWorldURL := fmt.Sprintf("https://raw.githubusercontent.com/istio/istio/%s/samples/helloworld/helloworld.yaml", version) - Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "service=helloworld", kubeconfig)).To(Succeed(), "Sample service deploy failed on Cluster #1") - Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "service=helloworld", kubeconfig2)).To(Succeed(), "Sample service deploy failed on Cluster #2") + Expect(kubectlClient1.SetNamespace(ns).ApplyWithLabels(helloWorldURL, "service=helloworld")).To(Succeed(), "Sample service deploy failed on Cluster #1") + Expect(kubectlClient2.SetNamespace(ns).ApplyWithLabels(helloWorldURL, "service=helloworld")).To(Succeed(), "Sample service deploy failed on Cluster #2") - Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "version=v1", kubeconfig)).To(Succeed(), "Sample service deploy failed on Cluster #1") - Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "version=v2", kubeconfig2)).To(Succeed(), "Sample service deploy failed on Cluster #2") + Expect(kubectlClient1.SetNamespace(ns).ApplyWithLabels(helloWorldURL, "version=v1")).To(Succeed(), "Sample service deploy failed on Cluster #1") + Expect(kubectlClient2.SetNamespace(ns).ApplyWithLabels(helloWorldURL, "version=v2")).To(Succeed(), "Sample service deploy failed on Cluster #2") sleepURL := fmt.Sprintf("https://raw.githubusercontent.com/istio/istio/%s/samples/sleep/sleep.yaml", version) - Expect(kubectl.Apply(ns, sleepURL, kubeconfig)).To(Succeed(), "Sample sleep deploy failed on Cluster #1") - Expect(kubectl.Apply(ns, sleepURL, kubeconfig2)).To(Succeed(), "Sample sleep deploy failed on Cluster #2") + Expect(kubectlClient1.SetNamespace(ns).Apply(sleepURL)).To(Succeed(), "Sample sleep deploy failed on Cluster #1") + Expect(kubectlClient2.SetNamespace(ns).Apply(sleepURL)).To(Succeed(), "Sample sleep deploy failed on Cluster #2") } // getListCurlResponses runs the curl command 10 times from the sleep pod in the given cluster and get response list -func getListCurlResponses(podName, kubeconfig string) []string { +func getListCurlResponses(k *kubectl.KubectlBuilder, podName string) []string { var responses []string for i := 0; i < 10; i++ { - response, err := kubectl.Exec("sample", podName, "sleep", "curl -sS helloworld.sample:5000/hello", kubeconfig) + response, err := k.SetNamespace("sample").Exec(podName, "sleep", "curl -sS helloworld.sample:5000/hello") Expect(err).NotTo(HaveOccurred()) responses = append(responses, response) } diff --git a/tests/e2e/multicluster/multicluster_primaryremote_test.go b/tests/e2e/multicluster/multicluster_primaryremote_test.go index 798db3c2c..d7a200c66 100644 --- a/tests/e2e/multicluster/multicluster_primaryremote_test.go +++ b/tests/e2e/multicluster/multicluster_primaryremote_test.go @@ -33,7 +33,6 @@ import ( . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" "github.com/istio-ecosystem/sail-operator/tests/e2e/util/istioctl" - "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -49,8 +48,8 @@ var _ = Describe("Multicluster deployment models", Ordered, func() { BeforeAll(func(ctx SpecContext) { if !skipDeploy { // Deploy the Sail Operator on both clusters - Expect(kubectl.CreateNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be created on Primary Cluster") - Expect(kubectl.CreateNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created on Remote Cluster") + Expect(kubectlClient1.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created on Primary Cluster") + Expect(kubectlClient2.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created on Remote Cluster") Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig)). To(Succeed(), "Operator failed to be deployed in Primary Cluster") @@ -81,8 +80,8 @@ var _ = Describe("Multicluster deployment models", Ordered, func() { Context("Istio version is: "+version.Version, func() { When("Istio resources are created in both clusters", func() { BeforeAll(func(ctx SpecContext) { - Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be created") - Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + Expect(kubectlClient1.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be created") + Expect(kubectlClient2.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be created") // Push the intermediate CA to both clusters Expect(certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig, "east", "network1", artifacts, clPrimary)). @@ -120,7 +119,7 @@ spec: network: %s` multiclusterPrimaryYAML := fmt.Sprintf(PrimaryYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster1", "network1") Log("Istio CR Primary: ", multiclusterPrimaryYAML) - Expect(kubectl.CreateFromString(multiclusterPrimaryYAML, kubeconfig)).To(Succeed(), "Istio Resource creation failed on Primary Cluster") + Expect(kubectlClient1.CreateFromString(multiclusterPrimaryYAML)).To(Succeed(), "Istio Resource creation failed on Primary Cluster") }) It("updates Istio CR on Primary cluster status to Ready", func(ctx SpecContext) { @@ -141,13 +140,13 @@ spec: When("Gateway is created on Primary cluster ", func() { BeforeAll(func(ctx SpecContext) { - Expect(kubectl.Apply(controlPlaneNamespace, eastGatewayYAML, kubeconfig)).To(Succeed(), "Gateway creation failed on Primary Cluster") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Apply(eastGatewayYAML)).To(Succeed(), "Gateway creation failed on Primary Cluster") // Expose istiod service in Primary cluster - Expect(kubectl.Apply(controlPlaneNamespace, exposeIstiodYAML, kubeconfig)).To(Succeed(), "Expose Istiod creation failed on Primary Cluster") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Apply(exposeIstiodYAML)).To(Succeed(), "Expose Istiod creation failed on Primary Cluster") // Expose the Gateway service in both clusters - Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig)).To(Succeed(), "Expose Service creation failed on Primary Cluster") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Apply(exposeServiceYAML)).To(Succeed(), "Expose Service creation failed on Primary Cluster") }) It("updates Gateway status to Available", func(ctx SpecContext) { @@ -179,31 +178,29 @@ spec: remoteIstioYAML := fmt.Sprintf(RemoteYAML, version.Name, remotePilotAddress) Log("RemoteIstio CR: ", remoteIstioYAML) By("Creating RemoteIstio CR on Remote Cluster") - Expect(kubectl.CreateFromString(remoteIstioYAML, kubeconfig2)).To(Succeed(), "RemoteIstio Resource creation failed on Remote Cluster") + Expect(kubectlClient2.CreateFromString(remoteIstioYAML)).To(Succeed(), "RemoteIstio Resource creation failed on Remote Cluster") // Set the controlplane cluster and network for Remote namespace By("Patching the istio-system namespace on Remote Cluster") Expect( - kubectl.Patch("", + kubectlClient2.Patch( "namespace", controlPlaneNamespace, "merge", - `{"metadata":{"annotations":{"topology.istio.io/controlPlaneClusters":"cluster1"}}}`, - kubeconfig2)). + `{"metadata":{"annotations":{"topology.istio.io/controlPlaneClusters":"cluster1"}}}`)). To(Succeed(), "Error patching istio-system namespace") Expect( - kubectl.Patch("", + kubectlClient2.Patch( "namespace", controlPlaneNamespace, "merge", - `{"metadata":{"labels":{"topology.istio.io/network":"network2"}}}`, - kubeconfig2)). + `{"metadata":{"labels":{"topology.istio.io/network":"network2"}}}`)). To(Succeed(), "Error patching istio-system namespace") // To be able to access the remote cluster from the primary cluster, we need to create a secret in the primary cluster // RemoteIstio resource will not be Ready until the secret is created // Get the internal IP of the control plane node in Remote cluster - internalIPRemote, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig2) + internalIPRemote, err := kubectlClient2.GetInternalIP("node-role.kubernetes.io/control-plane") Expect(internalIPRemote).NotTo(BeEmpty(), "Internal IP is empty for Remote Cluster") Expect(err).NotTo(HaveOccurred()) @@ -214,7 +211,7 @@ spec: By("Creating Remote Secret on Primary Cluster") secret, err := istioctl.CreateRemoteSecret(kubeconfig2, "remote", internalIPRemote) Expect(err).NotTo(HaveOccurred()) - Expect(kubectl.ApplyString("", secret, kubeconfig)).To(Succeed(), "Remote secret creation failed on Primary Cluster") + Expect(kubectlClient1.ApplyString(secret)).To(Succeed(), "Remote secret creation failed on Primary Cluster") }) It("secret is created", func(ctx SpecContext) { @@ -234,7 +231,7 @@ spec: When("gateway is created in Remote cluster", func() { BeforeAll(func(ctx SpecContext) { - Expect(kubectl.Apply(controlPlaneNamespace, westGatewayYAML, kubeconfig2)).To(Succeed(), "Gateway creation failed on Remote Cluster") + Expect(kubectlClient2.SetNamespace(controlPlaneNamespace).Apply(westGatewayYAML)).To(Succeed(), "Gateway creation failed on Remote Cluster") Success("Gateway is created in Remote cluster") }) @@ -249,7 +246,7 @@ spec: When("sample apps are deployed in both clusters", func() { BeforeAll(func(ctx SpecContext) { // Deploy the sample app in both clusters - deploySampleApp("sample", version, kubeconfig, kubeconfig2) + deploySampleApp("sample", version) Success("Sample app is deployed in both clusters") }) @@ -287,12 +284,12 @@ spec: Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Remote Cluster") // Run the curl command from the sleep pod in the Remote Cluster and get response list to validate that we get responses from both clusters - remoteResponses := strings.Join(getListCurlResponses(sleepPodNameRemote, kubeconfig2), "\n") + remoteResponses := strings.Join(getListCurlResponses(kubectlClient2, sleepPodNameRemote), "\n") Expect(remoteResponses).To(ContainSubstring("Hello version: v1"), "Responses from Remote Cluster are not the expected") Expect(remoteResponses).To(ContainSubstring("Hello version: v2"), "Responses from Remote Cluster are not the expected") // Run the curl command from the sleep pod in the Primary Cluster and get response list to validate that we get responses from both clusters - primaryResponses := strings.Join(getListCurlResponses(sleepPodNamePrimary, kubeconfig), "\n") + primaryResponses := strings.Join(getListCurlResponses(kubectlClient1, sleepPodNamePrimary), "\n") Expect(primaryResponses).To(ContainSubstring("Hello version: v1"), "Responses from Primary Cluster are not the expected") Expect(primaryResponses).To(ContainSubstring("Hello version: v2"), "Responses from Primary Cluster are not the expected") Success("Sample app is accessible from both clusters") @@ -301,8 +298,8 @@ spec: When("Istio CR and RemoteIstio CR are deleted in both clusters", func() { BeforeEach(func() { - Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig)).To(Succeed(), "Istio CR failed to be deleted") - Expect(kubectl.Delete(controlPlaneNamespace, "remoteistio", istioName, kubeconfig2)).To(Succeed(), "RemoteIstio CR failed to be deleted") + Expect(kubectlClient1.SetNamespace(controlPlaneNamespace).Delete("istio", istioName)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectlClient2.SetNamespace(controlPlaneNamespace).Delete("remoteistio", istioName)).To(Succeed(), "RemoteIstio CR failed to be deleted") Success("Istio and RemoteIstio are deleted") }) @@ -315,16 +312,16 @@ spec: AfterAll(func(ctx SpecContext) { // Delete namespace to ensure clean up for new tests iteration - Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") - Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + Expect(kubectlClient1.DeleteNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectlClient2.DeleteNamespace(controlPlaneNamespace)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") common.CheckNamespaceEmpty(ctx, clPrimary, controlPlaneNamespace) common.CheckNamespaceEmpty(ctx, clRemote, controlPlaneNamespace) Success("ControlPlane Namespaces are empty") // Delete the entire sample namespace in both clusters - Expect(kubectl.DeleteNamespace("sample", kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") - Expect(kubectl.DeleteNamespace("sample", kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + Expect(kubectlClient1.DeleteNamespace("sample")).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectlClient2.DeleteNamespace("sample")).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") common.CheckNamespaceEmpty(ctx, clPrimary, "sample") common.CheckNamespaceEmpty(ctx, clRemote, "sample") @@ -336,8 +333,8 @@ spec: AfterAll(func(ctx SpecContext) { // Delete the Sail Operator from both clusters - Expect(kubectl.DeleteNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") - Expect(kubectl.DeleteNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + Expect(kubectlClient1.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectlClient2.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") // Check that the namespace is empty common.CheckNamespaceEmpty(ctx, clPrimary, namespace) diff --git a/tests/e2e/multicluster/multicluster_suite_test.go b/tests/e2e/multicluster/multicluster_suite_test.go index 5c0cd061a..77515b124 100644 --- a/tests/e2e/multicluster/multicluster_suite_test.go +++ b/tests/e2e/multicluster/multicluster_suite_test.go @@ -25,6 +25,7 @@ import ( "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" k8sclient "github.com/istio-ecosystem/sail-operator/tests/e2e/util/client" env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" @@ -50,6 +51,9 @@ var ( westGatewayYAML string exposeServiceYAML string exposeIstiodYAML string + + kubectlClient1 *kubectl.KubectlBuilder + kubectlClient2 *kubectl.KubectlBuilder ) func TestInstall(t *testing.T) { @@ -59,6 +63,7 @@ func TestInstall(t *testing.T) { if ocp { t.Skip("Skipping test. Not valid for OCP") // TODO: Implement the steps to run the test on OCP + // https://github.com/istio-ecosystem/sail-operator/issues/365 } RegisterFailHandler(Fail) setup(t) @@ -92,4 +97,8 @@ func setup(t *testing.T) { westGatewayYAML = fmt.Sprintf("%s/docs/multicluster/east-west-gateway-net2.yaml", baseRepoDir) exposeServiceYAML = fmt.Sprintf("%s/docs/multicluster/expose-services.yaml", baseRepoDir) exposeIstiodYAML = fmt.Sprintf("%s/docs/multicluster/expose-istiod.yaml", baseRepoDir) + + // Initialize kubectl utilities, one for each cluster + kubectlClient1 = kubectl.NewKubectlBuilder().SetKubeconfig(kubeconfig) + kubectlClient2 = kubectl.NewKubectlBuilder().SetKubeconfig(kubeconfig2) } diff --git a/tests/e2e/operator/operator_install_test.go b/tests/e2e/operator/operator_install_test.go index cc6777d6c..cbd051feb 100644 --- a/tests/e2e/operator/operator_install_test.go +++ b/tests/e2e/operator/operator_install_test.go @@ -26,7 +26,6 @@ import ( common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" - "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -60,7 +59,7 @@ var _ = Describe("Operator", Ordered, func() { Describe("installation", func() { BeforeAll(func() { - Expect(kubectl.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created") + Expect(k.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created") extraArg := "" if ocp { @@ -125,7 +124,7 @@ var _ = Describe("Operator", Ordered, func() { Success("Operator uninstalled") By("Deleting the CRDs") - Expect(kubectl.DeleteCRDs(sailCRDs)).To(Succeed(), "CRDs failed to be deleted") + Expect(k.DeleteCRDs(sailCRDs)).To(Succeed(), "CRDs failed to be deleted") Success("CRDs deleted") }) }) diff --git a/tests/e2e/operator/operator_suite_test.go b/tests/e2e/operator/operator_suite_test.go index 32b737594..f0bd1261f 100644 --- a/tests/e2e/operator/operator_suite_test.go +++ b/tests/e2e/operator/operator_suite_test.go @@ -21,6 +21,7 @@ import ( k8sclient "github.com/istio-ecosystem/sail-operator/tests/e2e/util/client" env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" @@ -34,6 +35,8 @@ var ( namespace = env.Get("NAMESPACE", "sail-operator") deploymentName = env.Get("DEPLOYMENT_NAME", "sail-operator") multicluster = env.GetBool("MULTICLUSTER", false) + + k *kubectl.KubectlBuilder ) func TestInstall(t *testing.T) { @@ -58,4 +61,6 @@ func setup() { } else { GinkgoWriter.Println("Running on Kubernetes") } + + k = kubectl.NewKubectlBuilder() } diff --git a/tests/e2e/util/certs/certs.go b/tests/e2e/util/certs/certs.go index 78aaaa004..daddf56d9 100644 --- a/tests/e2e/util/certs/certs.go +++ b/tests/e2e/util/certs/certs.go @@ -227,7 +227,9 @@ func PushIntermediateCA(ns, kubeconfig, zone, network, basePath string, cl clien _, err := common.GetObject(context.Background(), cl, kube.Key("cacerts", ns), &corev1.Secret{}) if err != nil { // Label the namespace with the network - err = kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"topology.istio.io/network":"`+network+`"}}}`, kubeconfig) + k := kubectl.NewKubectlBuilder() + k.SetKubeconfig(kubeconfig) + err = k.Patch("namespace", ns, "merge", `{"metadata":{"labels":{"topology.istio.io/network":"`+network+`"}}}`) if err != nil { return fmt.Errorf("failed to label namespace: %w", err) } diff --git a/tests/e2e/util/common/e2e_utils.go b/tests/e2e/util/common/e2e_utils.go index e17e07686..50cbc0c7f 100644 --- a/tests/e2e/util/common/e2e_utils.go +++ b/tests/e2e/util/common/e2e_utils.go @@ -47,6 +47,8 @@ var ( // - 1.23.0-rc.1 // - 1.24-alpha istiodVersionRegex = regexp.MustCompile(`Version:"(\d+\.\d+(\.\d+)?(-\w+(\.\d+)?)?)`) + + k = kubectl.NewKubectlBuilder() ) // getObject returns the object with the given key @@ -135,52 +137,54 @@ func LogDebugInfo() { } func logOperatorDebugInfo() { - operator, err := kubectl.GetYAML(namespace, "deployment", deploymentName) + operator, err := k.SetNamespace(namespace).GetYAML("deployment", deploymentName) logDebugElement("Operator Deployment YAML", operator, err) - logs, err := kubectl.Logs(namespace, "deploy/"+deploymentName, ptr.Of(120*time.Second)) + logs, err := k.SetNamespace(namespace).Logs("deploy/"+deploymentName, ptr.Of(120*time.Second)) + k.ResetNamespace() logDebugElement("Operator logs", logs, err) - events, err := kubectl.GetEvents(namespace) + events, err := k.SetNamespace(namespace).GetEvents() logDebugElement("Events in "+namespace, events, err) // Temporaty information to gather more details about failure - pods, err := kubectl.GetPods(namespace, "", "-o wide") + pods, err := k.SetNamespace(namespace).GetPods("", "-o wide") logDebugElement("Pods in "+namespace, pods, err) - describe, err := kubectl.Describe(namespace, "deployment", deploymentName) + describe, err := k.SetNamespace(namespace).Describe("deployment", deploymentName) logDebugElement("Operator Deployment describe", describe, err) } func logIstioDebugInfo() { - resource, err := kubectl.GetYAML("", "istio", istioName) + resource, err := k.GetYAML("istio", istioName) logDebugElement("Istio YAML", resource, err) - output, err := kubectl.GetPods(controlPlaneNamespace, "", "-o wide") + output, err := k.SetNamespace(controlPlaneNamespace).GetPods("", "-o wide") logDebugElement("Pods in "+controlPlaneNamespace, output, err) - logs, err := kubectl.Logs(controlPlaneNamespace, "deploy/istiod", ptr.Of(120*time.Second)) + logs, err := k.SetNamespace(controlPlaneNamespace).Logs("deploy/istiod", ptr.Of(120*time.Second)) + k.ResetNamespace() logDebugElement("Istiod logs", logs, err) - events, err := kubectl.GetEvents(controlPlaneNamespace) + events, err := k.SetNamespace(controlPlaneNamespace).GetEvents() logDebugElement("Events in "+controlPlaneNamespace, events, err) } func logCNIDebugInfo() { - resource, err := kubectl.GetYAML("", "istiocni", istioCniName) + resource, err := k.GetYAML("istiocni", istioCniName) logDebugElement("IstioCNI YAML", resource, err) - ds, err := kubectl.GetYAML(istioCniNamespace, "daemonset", "istio-cni-node") + ds, err := k.SetNamespace(istioCniNamespace).GetYAML("daemonset", "istio-cni-node") logDebugElement("Istio CNI DaemonSet YAML", ds, err) - events, err := kubectl.GetEvents(istioCniNamespace) + events, err := k.SetNamespace(istioCniNamespace).GetEvents() logDebugElement("Events in "+istioCniNamespace, events, err) // Temporaty information to gather more details about failure - pods, err := kubectl.GetPods(istioCniNamespace, "", "-o wide") + pods, err := k.SetNamespace(istioCniNamespace).GetPods("", "-o wide") logDebugElement("Pods in "+istioCniNamespace, pods, err) - describe, err := kubectl.Describe(istioCniNamespace, "daemonset", "istio-cni-node") + describe, err := k.SetNamespace(istioCniNamespace).Describe("daemonset", "istio-cni-node") logDebugElement("Istio CNI DaemonSet describe", describe, err) } @@ -195,7 +199,8 @@ func logDebugElement(caption string, info string, err error) { } func GetVersionFromIstiod() (string, error) { - output, err := kubectl.Exec(controlPlaneNamespace, "deploy/istiod", "", "pilot-discovery version") + k := kubectl.NewKubectlBuilder() + output, err := k.SetNamespace(controlPlaneNamespace).Exec("deploy/istiod", "", "pilot-discovery version") if err != nil { return "", fmt.Errorf("error getting version from istiod: %w", err) } diff --git a/tests/e2e/util/kubectl/kubectl.go b/tests/e2e/util/kubectl/kubectl.go index 931470851..f07035513 100644 --- a/tests/e2e/util/kubectl/kubectl.go +++ b/tests/e2e/util/kubectl/kubectl.go @@ -23,181 +23,208 @@ import ( "github.com/istio-ecosystem/sail-operator/tests/e2e/util/shell" ) +type KubectlBuilder struct { + binary string + namespace string + kubeconfig string +} + const DefaultBinary = "kubectl" -// optionalKubeconfig add the flag --kubeconfig if the kubeconfig is set -func optionalKubeconfig(kubeconfig []string) string { - if len(kubeconfig) > 0 && kubeconfig[0] != "" { - return fmt.Sprintf("--kubeconfig %s", kubeconfig[0]) - } - return "" +func newKubectlBuilder() *KubectlBuilder { + return &KubectlBuilder{} } -// kubectl return the kubectl command -// If the environment variable COMMAND is set, it will return the value of COMMAND -// Otherwise, it will return the default value "kubectl" as default -// Arguments: -// - format: format of the command without kubeclt or oc -// - args: arguments of the command -func kubectl(format string, args ...interface{}) string { +func (k *KubectlBuilder) setBinary() { binary := DefaultBinary if cmd := os.Getenv("COMMAND"); cmd != "" { binary = cmd } - return binary + " " + fmt.Sprintf(format, args...) + k.binary = binary } -// CreateFromString creates a resource from the given yaml string -func CreateFromString(yamlString string, kubeconfig ...string) error { - cmd := kubectl("create %s -f -", optionalKubeconfig(kubeconfig)) - _, err := shell.ExecuteCommandWithInput(cmd, yamlString) - if err != nil { - return fmt.Errorf("error creating resource from yaml: %w", err) +func (k *KubectlBuilder) build(cmd string) string { + args := []string{k.binary} + + // Only append namespace if it's set + if k.namespace != "" { + args = append(args, k.namespace) } - return nil + + // Only append kubeconfig if it's set + if k.kubeconfig != "" { + args = append(args, k.kubeconfig) + } + + args = append(args, cmd) + + // Join all the arguments with a space + return strings.Join(args, " ") } -// ApplyString applies the given yaml string to the cluster -func ApplyString(ns, yamlString string, kubeconfig ...string) error { - nsflag := nsflag(ns) - // If the namespace is empty, we need to remove the flag because it will fail - // TODO: improve the nsflag function to handle this case +// NewKubectlBuilder creates a new KubectlBuilder +func NewKubectlBuilder() *KubectlBuilder { + k := newKubectlBuilder() + k.setBinary() + return k +} + +// SetNamespace sets the namespace +func (k *KubectlBuilder) SetNamespace(ns string) *KubectlBuilder { if ns == "" { - nsflag = "" + k.namespace = "--all-namespaces" + } else { + k.namespace = fmt.Sprintf("-n %s", ns) } + return k +} - cmd := kubectl("apply %s %s --server-side -f -", nsflag, optionalKubeconfig(kubeconfig)) - _, err := shell.ExecuteCommandWithInput(cmd, yamlString) +// SetKubeconfig sets the kubeconfig +func (k *KubectlBuilder) SetKubeconfig(kubeconfig string) *KubectlBuilder { + if kubeconfig != "" { + k.kubeconfig = fmt.Sprintf("--kubeconfig %s", kubeconfig) + } + return k +} + +// CreateNamespace creates a namespace +// If the namespace already exists, it will return nil +func (k *KubectlBuilder) CreateNamespace(ns string) error { + cmd := k.build(" create namespace " + ns) + output, err := k.executeCommand(cmd) if err != nil { - return fmt.Errorf("error applying yaml: %w", err) + if strings.Contains(output, "AlreadyExists") { + return nil + } + + return fmt.Errorf("error creating namespace: %w, output: %s", err, output) } return nil } -// Apply applies the given yaml file to the cluster -func Apply(ns, yamlFile string, kubeconfig ...string) error { - err := ApplyWithLabels(ns, yamlFile, "", kubeconfig...) - return err +// CreateFromString creates a resource from the given yaml string +func (k *KubectlBuilder) CreateFromString(yamlString string) error { + cmd := k.build(" create -f -") + _, err := shell.ExecuteCommandWithInput(cmd, yamlString) + k.ResetNamespace() + if err != nil { + return fmt.Errorf("error creating resource from yaml: %w", err) + } + return nil } -// ApplyWithLabels applies the given yaml file to the cluster with the given labels -func ApplyWithLabels(ns, yamlFile string, label string, kubeconfig ...string) error { - cmd := kubectl("apply -n %s %s -f %s %s", ns, labelFlag(label), yamlFile, optionalKubeconfig(kubeconfig)) - _, err := shell.ExecuteCommand(cmd) - if err != nil { - return fmt.Errorf("error applying yaml: %w", err) +// DeleteCRDs deletes the CRDs by given list of crds names +func (k *KubectlBuilder) DeleteCRDs(crds []string) error { + for _, crd := range crds { + cmd := k.build(" delete crd " + crd) + _, err := shell.ExecuteCommand(cmd) + if err != nil { + k.ResetNamespace() + return fmt.Errorf("error deleting crd %s: %w", crd, err) + } } + k.ResetNamespace() return nil } -// DeleteFromFile deletes a resource from the given yaml file -func DeleteFromFile(yamlFile string, kubeconfig ...string) error { - cmd := kubectl("delete -f %s %s", yamlFile, optionalKubeconfig(kubeconfig)) - _, err := shell.ExecuteCommand(cmd) +// DeleteNamespace deletes a namespace +func (k *KubectlBuilder) DeleteNamespace(ns string) error { + cmd := k.build(" delete namespace " + ns) + _, err := k.executeCommand(cmd) if err != nil { - return fmt.Errorf("error deleting resource from yaml: %w", err) + return fmt.Errorf("error deleting namespace: %w", err) } return nil } -// CreateNamespace creates a namespace -// If the namespace already exists, it will return nil -// Arguments: -// - ns: namespace -// - kubeconfig: optional kubeconfig to set the target file -func CreateNamespace(ns string, kubeconfig ...string) error { - cmd := kubectl("create namespace %s %s", ns, optionalKubeconfig(kubeconfig)) - output, err := shell.ExecuteCommand(cmd) +// ApplyString applies the given yaml string to the cluster +func (k *KubectlBuilder) ApplyString(yamlString string) error { + cmd := k.build(" apply --server-side -f -") + _, err := shell.ExecuteCommandWithInput(cmd, yamlString) + k.ResetNamespace() if err != nil { - if strings.Contains(output, "AlreadyExists") { - return nil - } - - return fmt.Errorf("error creating namespace: %w, output: %s", err, output) + return fmt.Errorf("error applying yaml: %w", err) } return nil } -// DeleteNamespace deletes a namespace -// Arguments: -// - ns: namespace -// - kubeconfig: optional kubeconfig to set the target file -func DeleteNamespace(ns string, kubeconfig ...string) error { - cmd := kubectl("delete namespace %s %s", ns, optionalKubeconfig(kubeconfig)) - _, err := shell.ExecuteCommand(cmd) +// Apply applies the given yaml file to the cluster +func (k *KubectlBuilder) Apply(yamlFile string) error { + err := k.ApplyWithLabels(yamlFile, "") + return err +} + +// ApplyWithLabels applies the given yaml file to the cluster with the given labels +func (k *KubectlBuilder) ApplyWithLabels(yamlFile, label string) error { + cmd := k.build(" apply " + labelFlag(label) + " -f " + yamlFile) + _, err := k.executeCommand(cmd) if err != nil { - return fmt.Errorf("error deleting namespace: %w", err) + return fmt.Errorf("error applying yaml: %w", err) } return nil } -// Delete deletes a resource based on the namespace, kind and the name. Optionally, you can provide a kubeconfig -func Delete(ns, kind, name string, kubeconfig ...string) error { - cmd := kubectl("delete %s %s %s %s", kind, name, nsflag(ns), optionalKubeconfig(kubeconfig)) - _, err := shell.ExecuteCommand(cmd) +// DeleteFromFile deletes a resource from the given yaml file +func (k *KubectlBuilder) DeleteFromFile(yamlFile string) error { + cmd := k.build(" delete -f " + yamlFile) + _, err := k.executeCommand(cmd) if err != nil { - return fmt.Errorf("error deleting deployment: %w", err) + return fmt.Errorf("error deleting resource from yaml: %w", err) } return nil } -// DeleteCRDs deletes the CRDs by given list of crds names -func DeleteCRDs(crds []string) error { - for _, crd := range crds { - cmd := kubectl("delete crd %s", crd) - _, err := shell.ExecuteCommand(cmd) - if err != nil { - return fmt.Errorf("error deleting crd %s: %w", crd, err) - } +// Delete deletes a resource based on the namespace, kind and the name +func (k *KubectlBuilder) Delete(kind, name string) error { + cmd := k.build(" delete " + kind + " " + name) + _, err := k.executeCommand(cmd) + if err != nil { + return fmt.Errorf("error deleting deployment: %w", err) } return nil } -// Patch patches a resource. -func Patch(ns, kind, name, patchType, patch string, kubeconfig ...string) error { - cmd := kubectl(`patch %s %s %s %s --type=%s -p=%q`, kind, name, prepend("-n", ns), optionalKubeconfig(kubeconfig), patchType, patch) - _, err := shell.ExecuteCommand(cmd) +// Patch patches a resource +func (k *KubectlBuilder) Patch(kind, name, patchType, patch string) error { + cmd := k.build(fmt.Sprintf(" patch %s %s --type=%s -p=%q", kind, name, patchType, patch)) + _, err := k.executeCommand(cmd) if err != nil { return fmt.Errorf("error patching resource: %w", err) } return nil } -// ForceDelete deletes a resource by removing its finalizers. -func ForceDelete(ns, kind, name string) error { +// ForceDelete deletes a resource by removing its finalizers +func (k *KubectlBuilder) ForceDelete(kind, name string) error { // Not all resources have finalizers, trying to remove them returns an error here. // We explicitly ignore the error and attempt to delete the resource anyway. - _ = Patch(ns, kind, name, "json", `[{"op": "remove", "path": "/metadata/finalizers"}]`) - return Delete(ns, kind, name) + _ = k.Patch(kind, name, "json", `[{"op": "remove", "path": "/metadata/finalizers"}]`) + return k.Delete(kind, name) } // GetYAML returns the yaml of a resource -// Arguments: -// - ns: namespace -// - kind: type of the resource -// - name: name of the resource -func GetYAML(ns, kind, name string) (string, error) { - cmd := kubectl("get %s %s %s -o yaml", kind, name, nsflag(ns)) - return shell.ExecuteCommand(cmd) +func (k *KubectlBuilder) GetYAML(kind, name string) (string, error) { + cmd := k.build(fmt.Sprintf(" get %s %s -o yaml", kind, name)) + output, err := k.executeCommand(cmd) + if err != nil { + return "", fmt.Errorf("error getting yaml: %w, output: %s", err, output) + } + + return output, nil } // GetPods returns the pods of a namespace -func GetPods(ns string, kubeconfig string, args ...string) (string, error) { - kubeconfigFlag := "" - if kubeconfig != "" { - kubeconfigFlag = fmt.Sprintf("--kubeconfig %s", kubeconfig) - } - - cmd := kubectl("get pods %s %s %s", nsflag(ns), strings.Join(args, " "), kubeconfigFlag) - output, err := shell.ExecuteCommand(cmd) +func (k *KubectlBuilder) GetPods(args ...string) (string, error) { + cmd := k.build(fmt.Sprintf(" get pods %s", strings.Join(args, " "))) + output, err := k.executeCommand(cmd) if err != nil { return "", fmt.Errorf("error getting pods: %w, output: %s", err, output) } @@ -205,70 +232,52 @@ func GetPods(ns string, kubeconfig string, args ...string) (string, error) { return output, nil } -// GetEvents returns the events of a namespace -func GetEvents(ns string) (string, error) { - cmd := kubectl("get events %s", nsflag(ns)) - output, err := shell.ExecuteCommand(cmd) +// GetInternalIP returns the internal IP of a node +func (k *KubectlBuilder) GetInternalIP(label string) (string, error) { + cmd := k.build(fmt.Sprintf(" get nodes -l %s -o jsonpath='{.items[0].status.addresses[?(@.type==\"InternalIP\")].address}'", label)) + output, err := k.executeCommand(cmd) if err != nil { - return "", fmt.Errorf("error getting events: %w, output: %s", err, output) + return "", fmt.Errorf("error getting internal IP: %w, output: %s", err, output) } return output, nil } -// Describe returns the description of a resource -// Arguments: -// - ns: namespace -// - kind: type of the resource -// - name: name of the resource -func Describe(ns, kind, name string) (string, error) { - cmd := kubectl("describe %s %s %s", kind, name, nsflag(ns)) - output, err := shell.ExecuteCommand(cmd) +// Exec executes a command in the pod or specific container +func (k *KubectlBuilder) Exec(pod, container, command string) (string, error) { + cmd := k.build(fmt.Sprintf(" exec %s %s -- %s", pod, containerflag(container), command)) + output, err := k.executeCommand(cmd) if err != nil { - return "", fmt.Errorf("error describing resource: %w, output: %s", err, output) + return "", err } - return output, nil } -// GetInternalIP returns the internal IP of a node -// Arguments: -// - label: label of the node -// - kubeconfig: optional kubeconfig to set the target file -func GetInternalIP(label string, kubeconfig ...string) (string, error) { - cmd := kubectl("get nodes -l %s -o jsonpath='{.items[0].status.addresses[?(@.type==\"InternalIP\")].address}' %s", label, optionalKubeconfig(kubeconfig)) - output, err := shell.ExecuteCommand(cmd) +// GetEvents returns the events of a namespace +func (k *KubectlBuilder) GetEvents() (string, error) { + cmd := k.build(" get events") + output, err := k.executeCommand(cmd) if err != nil { - return "", fmt.Errorf("error getting internal IP: %w, output: %s", err, output) + return "", fmt.Errorf("error getting events: %w, output: %s", err, output) } return output, nil } -// Logs returns the logs of a deployment -// Arguments: -// - ns: namespace -// - pod: the pod name, "kind/name", or "-l labelselector" -// - Since: time range -func Logs(ns, pod string, since *time.Duration) (string, error) { - cmd := kubectl("logs %s %s %s", pod, nsflag(ns), sinceFlag(since)) - output, err := shell.ExecuteCommand(cmd) +// Describe returns the description of a resource +func (k *KubectlBuilder) Describe(kind, name string) (string, error) { + cmd := k.build(fmt.Sprintf(" describe %s %s", kind, name)) + output, err := k.executeCommand(cmd) if err != nil { - return "", err + return "", fmt.Errorf("error describing resource: %w, output: %s", err, output) } - return output, nil -} -func sinceFlag(since *time.Duration) string { - if since == nil { - return "" - } - return "--since=" + since.String() + return output, nil } -// Exec executes a command in the pod or specific container -func Exec(ns, pod, container, command string, kubeconfig ...string) (string, error) { - cmd := kubectl("exec %s %s %s %s -- %s", pod, containerflag(container), nsflag(ns), optionalKubeconfig(kubeconfig), command) +// Logs returns the logs of a deployment +func (k *KubectlBuilder) Logs(pod string, since *time.Duration) (string, error) { + cmd := k.build(fmt.Sprintf(" logs %s %s", pod, sinceFlag(since))) output, err := shell.ExecuteCommand(cmd) if err != nil { return "", err @@ -276,19 +285,23 @@ func Exec(ns, pod, container, command string, kubeconfig ...string) (string, err return output, nil } -// prepend prepends the prefix, but only if str is not empty -func prepend(prefix, str string) string { - if str == "" { - return str - } - return prefix + str +// executeCommand handles running the command and then resets the namespace automatically +func (k *KubectlBuilder) executeCommand(cmd string) (string, error) { + result, err := shell.ExecuteCommand(cmd) + k.ResetNamespace() + return result, err } -func nsflag(ns string) string { - if ns == "" { - return "--all-namespaces" +// ResetNamespace resets the namespace +func (k *KubectlBuilder) ResetNamespace() { + k.namespace = "" +} + +func sinceFlag(since *time.Duration) string { + if since == nil { + return "" } - return "-n " + ns + return "--since=" + since.String() } func labelFlag(label string) string { From 4da701e28a038505937c5ecfc71ccfa5df672cf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Luk=C5=A1a?= Date: Mon, 30 Sep 2024 12:10:27 +0200 Subject: [PATCH 19/25] Update dependencies and fix transform (#374) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marko Lukša --- api/v1alpha1/values_types.gen.go | 48 +++++++++---------- .../manifests/sailoperator.io_istiocnis.yaml | 8 ++-- .../sailoperator.io_istiorevisions.yaml | 40 ++++++++-------- bundle/manifests/sailoperator.io_istios.yaml | 40 ++++++++-------- .../sailoperator.io_remoteistios.yaml | 40 ++++++++-------- chart/crds/sailoperator.io_istiocnis.yaml | 8 ++-- .../crds/sailoperator.io_istiorevisions.yaml | 40 ++++++++-------- chart/crds/sailoperator.io_istios.yaml | 40 ++++++++-------- chart/crds/sailoperator.io_remoteistios.yaml | 40 ++++++++-------- docs/api-reference/sailoperator.io.md | 48 +++++++++---------- go.mod | 6 +-- go.sum | 12 ++--- hack/api_transformer/transform.yaml | 4 +- 13 files changed, 188 insertions(+), 186 deletions(-) diff --git a/api/v1alpha1/values_types.gen.go b/api/v1alpha1/values_types.gen.go index 76ded79ff..276fc28b4 100644 --- a/api/v1alpha1/values_types.gen.go +++ b/api/v1alpha1/values_types.gen.go @@ -108,7 +108,7 @@ type CNIConfig struct { Affinity *k8sv1.Affinity `json:"affinity,omitempty"` // Additional annotations to apply to the istio-cni Pods. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // PodSecurityPolicy cluster role. No longer used anywhere. PspClusterRole string `json:"psp_cluster_role,omitempty"` @@ -126,7 +126,7 @@ type CNIConfig struct { Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` // No longer used for CNI. See: https://github.com/istio/istio/issues/49004 // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Privileged *bool `json:"privileged,omitempty"` // The Container seccompProfile // @@ -190,7 +190,7 @@ type CNIRepairConfig struct { RepairPods bool `json:"repairPods,omitempty"` // No longer used. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. CreateEvents string `json:"createEvents,omitempty"` // The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. // The mode defines the action the controller will take when a pod is detected as broken. @@ -240,7 +240,7 @@ type GlobalConfig struct { // // Deprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Arch *ArchConfig `json:"arch,omitempty"` // List of certSigners to allow "approve" action in the ClusterRole CertSigners []string `json:"certSigners,omitempty"` @@ -250,17 +250,17 @@ type GlobalConfig struct { // // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"` // Specifies the default pod disruption budget configuration. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. DefaultPodDisruptionBudget *DefaultPodDisruptionBudgetConfig `json:"defaultPodDisruptionBudget,omitempty"` // Default k8s resources settings for all Istio control plane components. // // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. DefaultResources *k8sv1.ResourceRequirements `json:"defaultResources,omitempty"` // Default node tolerations to be applied to all deployments so that all pods can be // scheduled to nodes with matching taints. Each component can overwrite @@ -269,7 +269,7 @@ type GlobalConfig struct { // Configure this field in case that all pods of Istio control plane are expected to // be scheduled to particular nodes with specified taints. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. DefaultTolerations []k8sv1.Toleration `json:"defaultTolerations,omitempty"` // Specifies the docker hub for Istio images. Hub string `json:"hub,omitempty"` @@ -363,7 +363,7 @@ type GlobalConfig struct { // // See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. PriorityClassName string `json:"priorityClassName,omitempty"` // Specifies how proxies are configured within Istio. Proxy *ProxyConfig `json:"proxy,omitempty"` @@ -387,7 +387,7 @@ type GlobalConfig struct { // Configure the policy for validating JWT. // This is deprecated and has no effect. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. JwtPolicy string `json:"jwtPolicy,omitempty"` // Specifies the configuration for Security Token Service. Sts *STSConfig `json:"sts,omitempty"` @@ -476,7 +476,7 @@ type PilotConfig struct { AutoscaleBehavior *autoscalingv2.HorizontalPodAutoscalerBehavior `json:"autoscaleBehavior,omitempty"` // Number of replicas in the Pilot Deployment. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. ReplicaCount uint32 `json:"replicaCount,omitempty"` // Image name used for Pilot. // @@ -494,19 +494,19 @@ type PilotConfig struct { // // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` // Target CPU utilization used in HorizontalPodAutoscaler. // // See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Cpu *TargetUtilizationConfig `json:"cpu,omitempty"` // K8s node selector. // // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Maximum duration that a sidecar can be connected to a pilot. // @@ -539,7 +539,7 @@ type PilotConfig struct { Affinity *k8sv1.Affinity `json:"affinity,omitempty"` // K8s rolling update strategy // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. // +kubebuilder:validation:XIntOrString RollingMaxSurge *intstr.IntOrString `json:"rollingMaxSurge,omitempty"` // The number of pods that can be unavailable during a rolling update (see @@ -548,20 +548,20 @@ type PilotConfig struct { // May be specified as a number of pods or as a percent of the total number // of pods at the start of the update. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. // +kubebuilder:validation:XIntOrString RollingMaxUnavailable *intstr.IntOrString `json:"rollingMaxUnavailable,omitempty"` // The node tolerations to be applied to the Pilot deployment so that it can be // scheduled to particular nodes with matching taints. // More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Tolerations []k8sv1.Toleration `json:"tolerations,omitempty"` // K8s annotations for pods. // // See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // K8s annotations for the Service. // @@ -602,7 +602,7 @@ type PilotConfig struct { // // See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Memory *TargetUtilizationConfig `json:"memory,omitempty"` // Configures whether to use an existing CNI installation for workloads Cni *CNIUsageConfig `json:"cni,omitempty"` @@ -726,7 +726,7 @@ type ProxyConfig struct { // // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` // Specify which tracer to use. One of: zipkin, lightstep, datadog, stackdriver. // If using stackdriver tracer outside GCP, set env GOOGLE_APPLICATION_CREDENTIALS to the GCP credential file. @@ -740,7 +740,7 @@ type ProxyConfig struct { // // Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. HoldApplicationUntilProxyStarts *bool `json:"holdApplicationUntilProxyStarts,omitempty"` // A comma separated list of inbound ports for which traffic is to be redirected to Envoy. // The wildcard character '*' can be used to configure redirection for all ports. @@ -772,7 +772,7 @@ type ProxyInitConfig struct { // // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Resources *k8sv1.ResourceRequirements `json:"resources,omitempty"` } @@ -786,7 +786,7 @@ type ResourcesRequestsConfig struct { // Configuration for the SecretDiscoveryService instead of using K8S secrets to mount the certificates. type SDSConfig struct { - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. Token *SDSConfigToken `json:"token,omitempty"` } @@ -964,7 +964,7 @@ type CNIGlobalConfig struct { // Default k8s resources settings for all Istio co // // See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container // - // Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + // Deprecated: Marked as deprecated in pkg/apis/values_types.proto. DefaultResources *k8sv1.ResourceRequirements `json:"defaultResources,omitempty"` // Specifies the docker hub for Istio images. diff --git a/bundle/manifests/sailoperator.io_istiocnis.yaml b/bundle/manifests/sailoperator.io_istiocnis.yaml index 6b37a1d81..b5d619dee 100644 --- a/bundle/manifests/sailoperator.io_istiocnis.yaml +++ b/bundle/manifests/sailoperator.io_istiocnis.yaml @@ -1079,13 +1079,13 @@ spec: description: |- Additional annotations to apply to the istio-cni Pods. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object privileged: description: |- No longer used for CNI. See: https://github.com/istio/istio/issues/49004 - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean provider: description: |- @@ -1123,7 +1123,7 @@ spec: description: |- No longer used. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string deletePods: description: |- @@ -1291,7 +1291,7 @@ spec: description: |- See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- diff --git a/bundle/manifests/sailoperator.io_istiorevisions.yaml b/bundle/manifests/sailoperator.io_istiorevisions.yaml index 091ee88d7..0db2183a7 100644 --- a/bundle/manifests/sailoperator.io_istiorevisions.yaml +++ b/bundle/manifests/sailoperator.io_istiorevisions.yaml @@ -118,7 +118,7 @@ spec: - Least preferred\n\t2 - No preference\n\t3 - Most preferred\n\nDeprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.\n\nDeprecated: - Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto." + Marked as deprecated in pkg/apis/values_types.proto." properties: amd64: description: Sets pod scheduling weight for amd64 arch @@ -170,13 +170,13 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object defaultPodDisruptionBudget: description: |- Specifies the default pod disruption budget configuration. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: enabled: description: Controls whether a PodDisruptionBudget with @@ -190,7 +190,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -251,7 +251,7 @@ spec: Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -344,7 +344,7 @@ spec: Configure the policy for validating JWT. This is deprecated and has no effect. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string logAsJson: description: Specifies whether istio components should output @@ -566,7 +566,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string proxy: description: Specifies how proxies are configured within Istio. @@ -610,7 +610,7 @@ spec: Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean image: description: |- @@ -901,7 +901,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1008,7 +1008,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1075,7 +1075,7 @@ spec: instead of using K8S secrets to mount the certificates. properties: token: - description: 'Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.' + description: 'Deprecated: Marked as deprecated in pkg/apis/values_types.proto.' properties: aud: type: string @@ -5429,7 +5429,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5507,7 +5507,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5525,7 +5525,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podAnnotations: additionalProperties: @@ -5535,7 +5535,7 @@ spec: See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podLabels: additionalProperties: @@ -5549,7 +5549,7 @@ spec: description: |- Number of replicas in the Pilot Deployment. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. format: int32 type: integer resources: @@ -5558,7 +5558,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -5617,7 +5617,7 @@ spec: description: |- K8s rolling update strategy - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true rollingMaxUnavailable: anyOf: @@ -5630,7 +5630,7 @@ spec: May be specified as a number of pods or as a percent of the total number of pods at the start of the update. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true seccompProfile: description: |- @@ -5693,7 +5693,7 @@ spec: scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches diff --git a/bundle/manifests/sailoperator.io_istios.yaml b/bundle/manifests/sailoperator.io_istios.yaml index 4057df688..16fcbaf81 100644 --- a/bundle/manifests/sailoperator.io_istios.yaml +++ b/bundle/manifests/sailoperator.io_istios.yaml @@ -177,7 +177,7 @@ spec: - Least preferred\n\t2 - No preference\n\t3 - Most preferred\n\nDeprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.\n\nDeprecated: - Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto." + Marked as deprecated in pkg/apis/values_types.proto." properties: amd64: description: Sets pod scheduling weight for amd64 arch @@ -229,13 +229,13 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object defaultPodDisruptionBudget: description: |- Specifies the default pod disruption budget configuration. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: enabled: description: Controls whether a PodDisruptionBudget with @@ -249,7 +249,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -310,7 +310,7 @@ spec: Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -403,7 +403,7 @@ spec: Configure the policy for validating JWT. This is deprecated and has no effect. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string logAsJson: description: Specifies whether istio components should output @@ -625,7 +625,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string proxy: description: Specifies how proxies are configured within Istio. @@ -669,7 +669,7 @@ spec: Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean image: description: |- @@ -960,7 +960,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1067,7 +1067,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1134,7 +1134,7 @@ spec: instead of using K8S secrets to mount the certificates. properties: token: - description: 'Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.' + description: 'Deprecated: Marked as deprecated in pkg/apis/values_types.proto.' properties: aud: type: string @@ -5488,7 +5488,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5566,7 +5566,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5584,7 +5584,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podAnnotations: additionalProperties: @@ -5594,7 +5594,7 @@ spec: See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podLabels: additionalProperties: @@ -5608,7 +5608,7 @@ spec: description: |- Number of replicas in the Pilot Deployment. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. format: int32 type: integer resources: @@ -5617,7 +5617,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -5676,7 +5676,7 @@ spec: description: |- K8s rolling update strategy - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true rollingMaxUnavailable: anyOf: @@ -5689,7 +5689,7 @@ spec: May be specified as a number of pods or as a percent of the total number of pods at the start of the update. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true seccompProfile: description: |- @@ -5752,7 +5752,7 @@ spec: scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches diff --git a/bundle/manifests/sailoperator.io_remoteistios.yaml b/bundle/manifests/sailoperator.io_remoteistios.yaml index 19c17f37d..a094c3731 100644 --- a/bundle/manifests/sailoperator.io_remoteistios.yaml +++ b/bundle/manifests/sailoperator.io_remoteistios.yaml @@ -172,7 +172,7 @@ spec: - Least preferred\n\t2 - No preference\n\t3 - Most preferred\n\nDeprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.\n\nDeprecated: - Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto." + Marked as deprecated in pkg/apis/values_types.proto." properties: amd64: description: Sets pod scheduling weight for amd64 arch @@ -224,13 +224,13 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object defaultPodDisruptionBudget: description: |- Specifies the default pod disruption budget configuration. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: enabled: description: Controls whether a PodDisruptionBudget with @@ -244,7 +244,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -305,7 +305,7 @@ spec: Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -398,7 +398,7 @@ spec: Configure the policy for validating JWT. This is deprecated and has no effect. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string logAsJson: description: Specifies whether istio components should output @@ -620,7 +620,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string proxy: description: Specifies how proxies are configured within Istio. @@ -664,7 +664,7 @@ spec: Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean image: description: |- @@ -955,7 +955,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1062,7 +1062,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1129,7 +1129,7 @@ spec: instead of using K8S secrets to mount the certificates. properties: token: - description: 'Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.' + description: 'Deprecated: Marked as deprecated in pkg/apis/values_types.proto.' properties: aud: type: string @@ -5483,7 +5483,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5561,7 +5561,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5579,7 +5579,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podAnnotations: additionalProperties: @@ -5589,7 +5589,7 @@ spec: See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podLabels: additionalProperties: @@ -5603,7 +5603,7 @@ spec: description: |- Number of replicas in the Pilot Deployment. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. format: int32 type: integer resources: @@ -5612,7 +5612,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -5671,7 +5671,7 @@ spec: description: |- K8s rolling update strategy - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true rollingMaxUnavailable: anyOf: @@ -5684,7 +5684,7 @@ spec: May be specified as a number of pods or as a percent of the total number of pods at the start of the update. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true seccompProfile: description: |- @@ -5747,7 +5747,7 @@ spec: scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches diff --git a/chart/crds/sailoperator.io_istiocnis.yaml b/chart/crds/sailoperator.io_istiocnis.yaml index 3852ba680..d92a14591 100644 --- a/chart/crds/sailoperator.io_istiocnis.yaml +++ b/chart/crds/sailoperator.io_istiocnis.yaml @@ -1079,13 +1079,13 @@ spec: description: |- Additional annotations to apply to the istio-cni Pods. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object privileged: description: |- No longer used for CNI. See: https://github.com/istio/istio/issues/49004 - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean provider: description: |- @@ -1123,7 +1123,7 @@ spec: description: |- No longer used. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string deletePods: description: |- @@ -1291,7 +1291,7 @@ spec: description: |- See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- diff --git a/chart/crds/sailoperator.io_istiorevisions.yaml b/chart/crds/sailoperator.io_istiorevisions.yaml index 0dabf6f01..a14357901 100644 --- a/chart/crds/sailoperator.io_istiorevisions.yaml +++ b/chart/crds/sailoperator.io_istiorevisions.yaml @@ -118,7 +118,7 @@ spec: - Least preferred\n\t2 - No preference\n\t3 - Most preferred\n\nDeprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.\n\nDeprecated: - Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto." + Marked as deprecated in pkg/apis/values_types.proto." properties: amd64: description: Sets pod scheduling weight for amd64 arch @@ -170,13 +170,13 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object defaultPodDisruptionBudget: description: |- Specifies the default pod disruption budget configuration. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: enabled: description: Controls whether a PodDisruptionBudget with @@ -190,7 +190,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -251,7 +251,7 @@ spec: Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -344,7 +344,7 @@ spec: Configure the policy for validating JWT. This is deprecated and has no effect. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string logAsJson: description: Specifies whether istio components should output @@ -566,7 +566,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string proxy: description: Specifies how proxies are configured within Istio. @@ -610,7 +610,7 @@ spec: Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean image: description: |- @@ -901,7 +901,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1008,7 +1008,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1075,7 +1075,7 @@ spec: instead of using K8S secrets to mount the certificates. properties: token: - description: 'Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.' + description: 'Deprecated: Marked as deprecated in pkg/apis/values_types.proto.' properties: aud: type: string @@ -5429,7 +5429,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5507,7 +5507,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5525,7 +5525,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podAnnotations: additionalProperties: @@ -5535,7 +5535,7 @@ spec: See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podLabels: additionalProperties: @@ -5549,7 +5549,7 @@ spec: description: |- Number of replicas in the Pilot Deployment. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. format: int32 type: integer resources: @@ -5558,7 +5558,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -5617,7 +5617,7 @@ spec: description: |- K8s rolling update strategy - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true rollingMaxUnavailable: anyOf: @@ -5630,7 +5630,7 @@ spec: May be specified as a number of pods or as a percent of the total number of pods at the start of the update. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true seccompProfile: description: |- @@ -5693,7 +5693,7 @@ spec: scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches diff --git a/chart/crds/sailoperator.io_istios.yaml b/chart/crds/sailoperator.io_istios.yaml index 8ccbb7dcc..e161d7172 100644 --- a/chart/crds/sailoperator.io_istios.yaml +++ b/chart/crds/sailoperator.io_istios.yaml @@ -177,7 +177,7 @@ spec: - Least preferred\n\t2 - No preference\n\t3 - Most preferred\n\nDeprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.\n\nDeprecated: - Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto." + Marked as deprecated in pkg/apis/values_types.proto." properties: amd64: description: Sets pod scheduling weight for amd64 arch @@ -229,13 +229,13 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object defaultPodDisruptionBudget: description: |- Specifies the default pod disruption budget configuration. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: enabled: description: Controls whether a PodDisruptionBudget with @@ -249,7 +249,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -310,7 +310,7 @@ spec: Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -403,7 +403,7 @@ spec: Configure the policy for validating JWT. This is deprecated and has no effect. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string logAsJson: description: Specifies whether istio components should output @@ -625,7 +625,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string proxy: description: Specifies how proxies are configured within Istio. @@ -669,7 +669,7 @@ spec: Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean image: description: |- @@ -960,7 +960,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1067,7 +1067,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1134,7 +1134,7 @@ spec: instead of using K8S secrets to mount the certificates. properties: token: - description: 'Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.' + description: 'Deprecated: Marked as deprecated in pkg/apis/values_types.proto.' properties: aud: type: string @@ -5488,7 +5488,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5566,7 +5566,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5584,7 +5584,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podAnnotations: additionalProperties: @@ -5594,7 +5594,7 @@ spec: See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podLabels: additionalProperties: @@ -5608,7 +5608,7 @@ spec: description: |- Number of replicas in the Pilot Deployment. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. format: int32 type: integer resources: @@ -5617,7 +5617,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -5676,7 +5676,7 @@ spec: description: |- K8s rolling update strategy - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true rollingMaxUnavailable: anyOf: @@ -5689,7 +5689,7 @@ spec: May be specified as a number of pods or as a percent of the total number of pods at the start of the update. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true seccompProfile: description: |- @@ -5752,7 +5752,7 @@ spec: scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches diff --git a/chart/crds/sailoperator.io_remoteistios.yaml b/chart/crds/sailoperator.io_remoteistios.yaml index edc2c67d5..4fd75d9ea 100644 --- a/chart/crds/sailoperator.io_remoteistios.yaml +++ b/chart/crds/sailoperator.io_remoteistios.yaml @@ -172,7 +172,7 @@ spec: - Least preferred\n\t2 - No preference\n\t3 - Most preferred\n\nDeprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.\n\nDeprecated: - Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto." + Marked as deprecated in pkg/apis/values_types.proto." properties: amd64: description: Sets pod scheduling weight for amd64 arch @@ -224,13 +224,13 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object defaultPodDisruptionBudget: description: |- Specifies the default pod disruption budget configuration. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: enabled: description: Controls whether a PodDisruptionBudget with @@ -244,7 +244,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -305,7 +305,7 @@ spec: Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches @@ -398,7 +398,7 @@ spec: Configure the policy for validating JWT. This is deprecated and has no effect. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string logAsJson: description: Specifies whether istio components should output @@ -620,7 +620,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: string proxy: description: Specifies how proxies are configured within Istio. @@ -664,7 +664,7 @@ spec: Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: boolean image: description: |- @@ -955,7 +955,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1062,7 +1062,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -1129,7 +1129,7 @@ spec: instead of using K8S secrets to mount the certificates. properties: token: - description: 'Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.' + description: 'Deprecated: Marked as deprecated in pkg/apis/values_types.proto.' properties: aud: type: string @@ -5483,7 +5483,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5561,7 +5561,7 @@ spec: See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: targetAverageUtilization: description: |- @@ -5579,7 +5579,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podAnnotations: additionalProperties: @@ -5589,7 +5589,7 @@ spec: See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. type: object podLabels: additionalProperties: @@ -5603,7 +5603,7 @@ spec: description: |- Number of replicas in the Pilot Deployment. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. format: int32 type: integer resources: @@ -5612,7 +5612,7 @@ spec: See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. properties: claims: description: |- @@ -5671,7 +5671,7 @@ spec: description: |- K8s rolling update strategy - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true rollingMaxUnavailable: anyOf: @@ -5684,7 +5684,7 @@ spec: May be specified as a number of pods or as a percent of the total number of pods at the start of the update. - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. x-kubernetes-int-or-string: true seccompProfile: description: |- @@ -5747,7 +5747,7 @@ spec: scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling - Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. + Deprecated: Marked as deprecated in pkg/apis/values_types.proto. items: description: |- The pod this Toleration is attached to tolerates any taint that matches diff --git a/docs/api-reference/sailoperator.io.md b/docs/api-reference/sailoperator.io.md index ce6e08862..7268a9bcc 100644 --- a/docs/api-reference/sailoperator.io.md +++ b/docs/api-reference/sailoperator.io.md @@ -104,14 +104,14 @@ _Appears in:_ | `cniNetnsDir` _string_ | The directory path within the cluster node's filesystem where network namespaces are located. Defaults to '/var/run/netns', in minikube/docker/others can be '/var/run/docker/netns'. | | | | `excludeNamespaces` _string array_ | List of namespaces that should be ignored by the CNI plugin. | | | | `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#affinity-v1-core)_ | K8s affinity to set on the istio-cni Pods. Can be used to exclude istio-cni from being scheduled on specified nodes. | | | -| `podAnnotations` _object (keys:string, values:string)_ | Additional annotations to apply to the istio-cni Pods. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `podAnnotations` _object (keys:string, values:string)_ | Additional annotations to apply to the istio-cni Pods. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `psp_cluster_role` _string_ | PodSecurityPolicy cluster role. No longer used anywhere. | | | | `logging` _[GlobalLoggingConfig](#globalloggingconfig)_ | Same as `global.logging.level`, but will override it if set | | | | `repair` _[CNIRepairConfig](#cnirepairconfig)_ | Configuration for the CNI Repair controller. | | | | `chained` _boolean_ | Configure the plugin as a chained CNI plugin. When true, the configuration is added to the CNI chain; when false, the configuration is added as a standalone file in the CNI configuration directory. | | | | `resource_quotas` _[ResourceQuotas](#resourcequotas)_ | The resource quotas configration for the CNI DaemonSet. | | | | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | The k8s resource requests and limits for the istio-cni Pods. | | | -| `privileged` _boolean_ | No longer used for CNI. See: https://github.com/istio/istio/issues/49004 Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `privileged` _boolean_ | No longer used for CNI. See: https://github.com/istio/istio/issues/49004 Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `seccompProfile` _[SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#seccompprofile-v1-core)_ | The Container seccompProfile See: https://kubernetes.io/docs/tutorials/security/seccomp/ | | | | `provider` _string_ | Specifies the CNI provider. Can be either "default" or "multus". When set to "multus", an additional NetworkAttachmentDefinition resource is deployed to the cluster to allow the istio-cni plugin to be invoked in a cluster using the Multus CNI plugin. | | | | `rollingMaxUnavailable` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#intorstring-intstr-util)_ | The number of pods that can be unavailable during a rolling update of the CNI DaemonSet (see `updateStrategy.rollingUpdate.maxUnavailable` here: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec). May be specified as a number of pods or as a percent of the total number of pods at the start of the update. | | XIntOrString: \{\} | @@ -130,7 +130,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `defaultResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `defaultResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `hub` _string_ | Specifies the docker hub for Istio images. | | | | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#pullpolicy-v1-core)_ | Specifies the image pull policy for the Istio images. one of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | | Enum: [Always Never IfNotPresent] | | `imagePullSecrets` _string array_ | ImagePullSecrets for the control plane ServiceAccount, list of secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. Must be set for any cluster configured with private docker registry. | | | @@ -159,7 +159,7 @@ _Appears in:_ | `image` _string_ | Image name to pull from. Image will be `Hub/Image:Tag-Variant`. If Image contains a "/", it will replace the entire `image` in the pod. | | | | `labelPods` _boolean_ | The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. The mode defines the action the controller will take when a pod is detected as broken. If labelPods is true, the controller will label all broken pods with =. This is only capable of identifying broken pods; the user is responsible for fixing them (generally, by deleting them). Note this gives the DaemonSet a relatively high privilege, as modifying pod metadata/status can have wider impacts. | | | | `repairPods` _boolean_ | The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. The mode defines the action the controller will take when a pod is detected as broken. If repairPods is true, the controller will dynamically repair any broken pod by setting up the pod networking configuration even after it has started. Note the pod will be crashlooping, so this may take a few minutes to become fully functional based on when the retry occurs. This requires no RBAC privilege, but will require the CNI agent to run as a privileged pod. | | | -| `createEvents` _string_ | No longer used. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `createEvents` _string_ | No longer used. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `deletePods` _boolean_ | The Repair controller has 3 modes (labelPods, deletePods, and repairPods). Pick which one meets your use cases. Note only one may be used. The mode defines the action the controller will take when a pod is detected as broken. If deletePods is true, the controller will delete the broken pod. The pod will then be rescheduled, hopefully onto a node that is fully ready. Note this gives the DaemonSet a relatively high privilege, as it can delete any Pod. | | | | `brokenPodLabelKey` _string_ | The label key to apply to a broken pod when the controller is in labelPods mode. | | | | `brokenPodLabelValue` _string_ | The label value to apply to a broken pod when the controller is in labelPods mode. | | | @@ -375,13 +375,13 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `arch` _[ArchConfig](#archconfig)_ | Specifies pod scheduling arch(amd64, ppc64le, s390x, arm64) and weight as follows: 0 - Never scheduled 1 - Least preferred 2 - No preference 3 - Most preferred Deprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `arch` _[ArchConfig](#archconfig)_ | Specifies pod scheduling arch(amd64, ppc64le, s390x, arm64) and weight as follows: 0 - Never scheduled 1 - Least preferred 2 - No preference 3 - Most preferred Deprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `certSigners` _string array_ | List of certSigners to allow "approve" action in the ClusterRole | | | | `configValidation` _boolean_ | Controls whether the server-side validation is enabled. | | | -| `defaultNodeSelector` _object (keys:string, values:string)_ | Default k8s node selector for all the Istio control plane components See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | -| `defaultPodDisruptionBudget` _[DefaultPodDisruptionBudgetConfig](#defaultpoddisruptionbudgetconfig)_ | Specifies the default pod disruption budget configuration. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | -| `defaultResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | Default k8s resources settings for all Istio control plane components. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | -| `defaultTolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core) array_ | Default node tolerations to be applied to all deployments so that all pods can be scheduled to nodes with matching taints. Each component can overwrite these default values by adding its tolerations block in the relevant section below and setting the desired values. Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `defaultNodeSelector` _object (keys:string, values:string)_ | Default k8s node selector for all the Istio control plane components See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | +| `defaultPodDisruptionBudget` _[DefaultPodDisruptionBudgetConfig](#defaultpoddisruptionbudgetconfig)_ | Specifies the default pod disruption budget configuration. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | +| `defaultResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | Default k8s resources settings for all Istio control plane components. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | +| `defaultTolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core) array_ | Default node tolerations to be applied to all deployments so that all pods can be scheduled to nodes with matching taints. Each component can overwrite these default values by adding its tolerations block in the relevant section below and setting the desired values. Configure this field in case that all pods of Istio control plane are expected to be scheduled to particular nodes with specified taints. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `hub` _string_ | Specifies the docker hub for Istio images. | | | | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#pullpolicy-v1-core)_ | Specifies the image pull policy for the Istio images. one of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | | Enum: [Always Never IfNotPresent] | | `imagePullSecrets` _string array_ | ImagePullSecrets for the control plane ServiceAccount, list of secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. Must be set for any cluster configured with private docker registry. | | | @@ -395,7 +395,7 @@ _Appears in:_ | `podDNSSearchNamespaces` _string array_ | Custom DNS config for the pod to resolve names of services in other clusters. Use this to add additional search domains, and other settings. see https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config This does not apply to gateway pods as they typically need a different set of DNS settings than the normal application pods (e.g. in multicluster scenarios). | | | | `omitSidecarInjectorConfigMap` _boolean_ | Controls whether the creation of the sidecar injector ConfigMap should be skipped. Defaults to false. When set to true, the sidecar injector ConfigMap will not be created. | | | | `operatorManageWebhooks` _boolean_ | Controls whether the WebhookConfiguration resource(s) should be created. The current behavior of Istiod is to manage its own webhook configurations. When this option is set to true, Istio Operator, instead of webhooks, manages the webhook configurations. When this option is set as false, webhooks manage their own webhook configurations. | | | -| `priorityClassName` _string_ | Specifies the k8s priorityClassName for the istio control plane components. See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `priorityClassName` _string_ | Specifies the k8s priorityClassName for the istio control plane components. See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `proxy` _[ProxyConfig](#proxyconfig)_ | Specifies how proxies are configured within Istio. | | | | `proxy_init` _[ProxyInitConfig](#proxyinitconfig)_ | Specifies the Configuration for proxy_init container which sets the pods' networking to intercept the inbound/outbound traffic. | | | | `sds` _[SDSConfig](#sdsconfig)_ | Specifies the Configuration for the SecretDiscoveryService instead of using K8S secrets to mount the certificates. | | | @@ -405,7 +405,7 @@ _Appears in:_ | `remotePilotAddress` _string_ | Specifies the Istio control plane’s pilot Pod IP address or remote cluster DNS resolvable hostname. | | | | `istiod` _[IstiodConfig](#istiodconfig)_ | Specifies the configution of istiod | | | | `pilotCertProvider` _string_ | Configure the Pilot certificate provider. Currently, four providers are supported: "kubernetes", "istiod", "custom" and "none". | | | -| `jwtPolicy` _string_ | Configure the policy for validating JWT. This is deprecated and has no effect. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `jwtPolicy` _string_ | Configure the policy for validating JWT. This is deprecated and has no effect. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `sts` _[STSConfig](#stsconfig)_ | Specifies the configuration for Security Token Service. | | | | `revision` _string_ | Configures the revision this control plane is a part of | | | | `mountMtlsCerts` _boolean_ | Controls whether the in-cluster MTLS key and certs are loaded from the secret volume mounts. | | | @@ -2067,22 +2067,22 @@ _Appears in:_ | `autoscaleMin` _integer_ | Minimum number of replicas in the HorizontalPodAutoscaler for Pilot. | | | | `autoscaleMax` _integer_ | Maximum number of replicas in the HorizontalPodAutoscaler for Pilot. | | | | `autoscaleBehavior` _[HorizontalPodAutoscalerBehavior](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#horizontalpodautoscalerbehavior-v2-autoscaling)_ | See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior | | | -| `replicaCount` _integer_ | Number of replicas in the Pilot Deployment. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `replicaCount` _integer_ | Number of replicas in the Pilot Deployment. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `image` _string_ | Image name used for Pilot. This can be set either to image name if hub is also set, or can be set to the full hub:name string. Examples: custom-pilot, docker.io/someuser:custom-pilot | | | | `traceSampling` _float_ | Trace sampling fraction. Used to set the fraction of time that traces are sampled. Higher values are more accurate but add CPU overhead. Allowed values: 0.0 to 1.0 | | | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | K8s resources settings. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | -| `cpu` _[TargetUtilizationConfig](#targetutilizationconfig)_ | Target CPU utilization used in HorizontalPodAutoscaler. See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | -| `nodeSelector` _object (keys:string, values:string)_ | K8s node selector. See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | K8s resources settings. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | +| `cpu` _[TargetUtilizationConfig](#targetutilizationconfig)_ | Target CPU utilization used in HorizontalPodAutoscaler. See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | +| `nodeSelector` _object (keys:string, values:string)_ | K8s node selector. See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `keepaliveMaxServerConnectionAge` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#duration-v1-meta)_ | Maximum duration that a sidecar can be connected to a pilot. This setting balances out load across pilot instances, but adds some resource overhead. Examples: 300s, 30m, 1h | | | | `deploymentLabels` _object (keys:string, values:string)_ | Labels that are added to Pilot deployment. See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ | | | | `podLabels` _object (keys:string, values:string)_ | Labels that are added to Pilot pods. See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ | | | | `configMap` _boolean_ | Configuration settings passed to Pilot as a ConfigMap. This controls whether the mesh config map, generated from values.yaml is generated. If false, pilot wil use default values or user-supplied values, in that order of preference. | | | | `env` _object (keys:string, values:string)_ | Environment variables passed to the Pilot container. Examples: env: ENV_VAR_1: value1 ENV_VAR_2: value2 | | | | `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#affinity-v1-core)_ | K8s affinity to set on the Pilot Pods. | | | -| `rollingMaxSurge` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#intorstring-intstr-util)_ | K8s rolling update strategy Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | XIntOrString: \{\} | -| `rollingMaxUnavailable` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#intorstring-intstr-util)_ | The number of pods that can be unavailable during a rolling update (see `strategy.rollingUpdate.maxUnavailable` here: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#DeploymentSpec). May be specified as a number of pods or as a percent of the total number of pods at the start of the update. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | XIntOrString: \{\} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core) array_ | The node tolerations to be applied to the Pilot deployment so that it can be scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | -| `podAnnotations` _object (keys:string, values:string)_ | K8s annotations for pods. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `rollingMaxSurge` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#intorstring-intstr-util)_ | K8s rolling update strategy Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | XIntOrString: \{\} | +| `rollingMaxUnavailable` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#intorstring-intstr-util)_ | The number of pods that can be unavailable during a rolling update (see `strategy.rollingUpdate.maxUnavailable` here: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#DeploymentSpec). May be specified as a number of pods or as a percent of the total number of pods at the start of the update. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | XIntOrString: \{\} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#toleration-v1-core) array_ | The node tolerations to be applied to the Pilot deployment so that it can be scheduled to particular nodes with matching taints. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | +| `podAnnotations` _object (keys:string, values:string)_ | K8s annotations for pods. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `serviceAnnotations` _object (keys:string, values:string)_ | K8s annotations for the Service. See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ | | | | `serviceAccountAnnotations` _object (keys:string, values:string)_ | K8s annotations for the service account | | | | `jwksResolverExtraRootCA` _string_ | Specifies an extra root certificate in PEM format. This certificate will be trusted by pilot when resolving JWKS URIs. | | | @@ -2096,7 +2096,7 @@ _Appears in:_ | `volumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#volume-v1-core) array_ | Additional volumes to add to the Pilot Pod. | | | | `ipFamilies` _string array_ | Defines which IP family to use for single stack or the order of IP families for dual-stack. Valid list items are "IPv4", "IPv6". More info: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services | | | | `ipFamilyPolicy` _string_ | Controls whether Services are configured to use IPv4, IPv6, or both. Valid options are PreferDualStack, RequireDualStack, and SingleStack. More info: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services | | | -| `memory` _[TargetUtilizationConfig](#targetutilizationconfig)_ | Target memory utilization used in HorizontalPodAutoscaler. See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `memory` _[TargetUtilizationConfig](#targetutilizationconfig)_ | Target memory utilization used in HorizontalPodAutoscaler. See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `cni` _[CNIUsageConfig](#cniusageconfig)_ | Configures whether to use an existing CNI installation for workloads | | | | `taint` _[PilotTaintControllerConfig](#pilottaintcontrollerconfig)_ | | | | | `trustedZtunnelNamespace` _string_ | If set, `istiod` will allow connections from trusted node proxy ztunnels in the provided namespace. | | | @@ -2200,11 +2200,11 @@ _Appears in:_ | `readinessFailureThreshold` _integer_ | Sets the number of successive failed probes before indicating readiness failure. | | | | `startupProbe` _[StartupProbe](#startupprobe)_ | Configures the startup probe for the istio-proxy container. | | | | `statusPort` _integer_ | Default port used for the Pilot agent's health checks. | | | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | K8s resources settings. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | K8s resources settings. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `tracer` _[Tracer](#tracer)_ | Specify which tracer to use. One of: zipkin, lightstep, datadog, stackdriver. If using stackdriver tracer outside GCP, set env GOOGLE_APPLICATION_CREDENTIALS to the GCP credential file. | | Enum: [zipkin lightstep datadog stackdriver openCensusAgent none] | | `excludeOutboundPorts` _string_ | A comma separated list of outbound ports to be excluded from redirection to Envoy. | | | | `lifecycle` _[Lifecycle](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#lifecycle-v1-core)_ | The k8s lifecycle hooks definition (pod.spec.containers.lifecycle) for the proxy container. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks | | | -| `holdApplicationUntilProxyStarts` _boolean_ | Controls if sidecar is injected at the front of the container list and blocks the start of the other containers until the proxy is ready Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `holdApplicationUntilProxyStarts` _boolean_ | Controls if sidecar is injected at the front of the container list and blocks the start of the other containers until the proxy is ready Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior. Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | | `includeInboundPorts` _string_ | A comma separated list of inbound ports for which traffic is to be redirected to Envoy. The wildcard character '*' can be used to configure redirection for all ports. | | | | `includeOutboundPorts` _string_ | A comma separated list of outbound ports for which traffic is to be redirected to Envoy, regardless of the destination IP. | | | @@ -2391,7 +2391,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `image` _string_ | Specifies the image for the proxy_init container. | | | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | K8s resources settings. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#resourcerequirements-v1-core)_ | K8s resources settings. See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | #### RemoteIstio @@ -2626,7 +2626,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `token` _[SDSConfigToken](#sdsconfigtoken)_ | Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto. | | | +| `token` _[SDSConfigToken](#sdsconfigtoken)_ | Deprecated: Marked as deprecated in pkg/apis/values_types.proto. | | | #### SDSConfigToken diff --git a/go.mod b/go.mod index b47511cb4..93d5c2519 100644 --- a/go.mod +++ b/go.mod @@ -21,8 +21,8 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.15.1 - istio.io/client-go v1.23.0-alpha.0.0.20240809192551-f32a7326ae19 - istio.io/istio v0.0.0-20240813230019-b191b039631b + istio.io/client-go v1.23.0-alpha.0.0.20240815005320-98e06cc71273 + istio.io/istio v0.0.0-20240815163146-1ad41e17ee31 k8s.io/api v0.30.3 k8s.io/apiextensions-apiserver v0.30.3 k8s.io/apimachinery v0.30.3 @@ -159,7 +159,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gotest.tools/v3 v3.5.1 // indirect - istio.io/api v1.23.0-alpha.0.0.20240809192158-5302fff8a801 // indirect + istio.io/api v1.23.0-alpha.0.0.20240815004820-dd780031d531 // indirect k8s.io/apiserver v0.30.3 // indirect k8s.io/component-base v0.30.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 21e138aac..81a3e2c53 100644 --- a/go.sum +++ b/go.sum @@ -520,12 +520,12 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= helm.sh/helm/v3 v3.15.1 h1:22ztacHz4gMqhXNqCQ9NAg6BFWoRUryNLvnkz6OVyw0= helm.sh/helm/v3 v3.15.1/go.mod h1:fvfoRcB8UKRUV5jrIfOTaN/pG1TPhuqSb56fjYdTKXg= -istio.io/api v1.23.0-alpha.0.0.20240809192158-5302fff8a801 h1:7cxaHipka0v7rK9/kcthjmIucVnV3jHEROUBHONYeCo= -istio.io/api v1.23.0-alpha.0.0.20240809192158-5302fff8a801/go.mod h1:MQnRok7RZ20/PE56v0LxmoWH0xVxnCQPNuf9O7PAN1I= -istio.io/client-go v1.23.0-alpha.0.0.20240809192551-f32a7326ae19 h1:NwuYY0d6HZLNLHBrgtfZ6Q/dw+DwGYTFPtqoP7XNx7g= -istio.io/client-go v1.23.0-alpha.0.0.20240809192551-f32a7326ae19/go.mod h1:VarGlJP6p95t2P7KKKAPKpnG5XA4fv/y8V6EfgcYRcw= -istio.io/istio v0.0.0-20240813230019-b191b039631b h1:1EWcxrdpcuaNKqOf93hcxS42+n23bfMRJWQHc7MtTOw= -istio.io/istio v0.0.0-20240813230019-b191b039631b/go.mod h1:4YypioCIdszSllDb9Vo+U79ThLw6ilpnsmlJxvVaJ9c= +istio.io/api v1.23.0-alpha.0.0.20240815004820-dd780031d531 h1:SzRpQFb33yZ5/RWNEMkEzwSKihIoaqS6l6bz/Pixq18= +istio.io/api v1.23.0-alpha.0.0.20240815004820-dd780031d531/go.mod h1:MQnRok7RZ20/PE56v0LxmoWH0xVxnCQPNuf9O7PAN1I= +istio.io/client-go v1.23.0-alpha.0.0.20240815005320-98e06cc71273 h1:vVlZ6fCq+e+SkKNsg06LVF4842k9ycOQSoamLlPPFeU= +istio.io/client-go v1.23.0-alpha.0.0.20240815005320-98e06cc71273/go.mod h1:Y4vkrP/cgEbtlOi4E++YhCvLkMotuWFtoQTvv+nWBLY= +istio.io/istio v0.0.0-20240815163146-1ad41e17ee31 h1:aL+yEfXFRWZgxall6phMbGP/CId65TVyR94sdm6pwo0= +istio.io/istio v0.0.0-20240815163146-1ad41e17ee31/go.mod h1:vg5Yp9IKYRlIsFRd5MOQh0/3zjk0WCWe0NVu1Q9DpZQ= k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= diff --git a/hack/api_transformer/transform.yaml b/hack/api_transformer/transform.yaml index 4c5ed04bd..5a9a0c95a 100644 --- a/hack/api_transformer/transform.yaml +++ b/hack/api_transformer/transform.yaml @@ -42,8 +42,10 @@ globalTransformations: inputFiles: - module: istio.io/istio - path: /operator/pkg/apis/istio/v1alpha1/values_types.pb.go + path: /operator/pkg/apis/values_types.pb.go transformations: + renameImports: + v1: k8sv1 removeTypes: - EgressGatewayConfig - IngressGatewayConfig From 196dc486d5beff1b1e866872a61fc936db4bd107 Mon Sep 17 00:00:00 2001 From: Tyler Schade Date: Mon, 30 Sep 2024 08:06:27 -0400 Subject: [PATCH 20/25] Add ability to specify resources in helm chart (#375) * add ability to specify resources in helm chart Signed-off-by: Tyler Schade * --amend Signed-off-by: Tyler Schade * --amend Signed-off-by: Tyler Schade --------- Signed-off-by: Tyler Schade --- chart/templates/deployment.yaml | 16 ++++++++-------- chart/values.yaml | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index d9fdcfd85..96d389713 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -64,11 +64,11 @@ spec: protocol: TCP resources: limits: - cpu: 500m - memory: 128Mi + cpu: {{ .Values.proxy.resources.limits.cpu }} + memory: {{ .Values.proxy.resources.limits.memory }} requests: - cpu: 5m - memory: 64Mi + cpu: {{ .Values.proxy.resources.requests.cpu }} + memory: {{ .Values.proxy.resources.requests.memory }} securityContext: allowPrivilegeEscalation: false capabilities: @@ -101,11 +101,11 @@ spec: periodSeconds: 10 resources: limits: - cpu: 500m - memory: 512Mi + cpu: {{ .Values.operator.resources.limits.cpu }} + memory: {{ .Values.operator.resources.limits.memory }} requests: - cpu: 10m - memory: 64Mi + cpu: {{ .Values.operator.resources.requests.cpu }} + memory: {{ .Values.operator.resources.requests.memory }} securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/chart/values.yaml b/chart/values.yaml index 9dae758ca..54acfceb6 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -49,6 +49,21 @@ proxy: image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 # We're commenting out the imagePullPolicy to use k8s defaults # imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi +operator: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi # setting this to true will add resources required to generate the bundle using operator-sdk bundleGeneration: false From 2a194f0ce9a7117f4a8e8d963deeacb5c0a97a55 Mon Sep 17 00:00:00 2001 From: Maxim Babushkin Date: Mon, 30 Sep 2024 18:16:28 +0300 Subject: [PATCH 21/25] Improve pull request template (#376) Current pull request template overloads with fields. By reducing the amount of fields in the template, it will improve contributors expiriense. Signed-off-by: Maxim Babushkin --- .github/pull_request_template.md | 32 +------------------------------- 1 file changed, 1 insertion(+), 31 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index fc34ef026..9a3170f7f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -25,9 +25,7 @@ Please, use the following labels, according to the PR type: - [ ] Documentation Update #### What this PR does / why we need it: -```text -``` #### Which issue(s) this PR fixes: -```release-note - -``` - -#### Additional documentation: -```text - -``` - -#### Does this PR introduce a breaking change? -```text - -``` - -#### Other information: -```text - -``` +#### Additional information: From e1fcbdb9517a87c42df6d82a7e7c38450344534c Mon Sep 17 00:00:00 2001 From: Sridhar Gaddam Date: Mon, 30 Sep 2024 21:50:27 +0530 Subject: [PATCH 22/25] Implement e2e dualStack control plane tests (#373) * Implement e2e dualStack control plane tests This PR includes the necessary support 1. to deploy dualStack/IPv6 KIND clusters 2. to deploy the supported Istio versions in a dualStack mode and verifies that Istiod is properly configured to support dualStack use-cases. To run e2e dualStack tests, use the following command. $ export IP_FAMILY=dual $ make test.e2e.kind Related to: https://github.com/istio-ecosystem/sail-operator/issues/372 Signed-off-by: Sridhar Gaddam * Modify the text used in Describe and other blocks Signed-off-by: Sridhar Gaddam --------- Signed-off-by: Sridhar Gaddam --- tests/e2e/common-operator-integ-suite.sh | 2 +- tests/e2e/dualstack/dualstack_suite_test.go | 65 ++++++ tests/e2e/dualstack/dualstack_test.go | 213 ++++++++++++++++++++ tests/e2e/integ-suite-kind.sh | 2 +- 4 files changed, 280 insertions(+), 2 deletions(-) create mode 100644 tests/e2e/dualstack/dualstack_suite_test.go create mode 100644 tests/e2e/dualstack/dualstack_test.go diff --git a/tests/e2e/common-operator-integ-suite.sh b/tests/e2e/common-operator-integ-suite.sh index 304c2b005..e54f57465 100755 --- a/tests/e2e/common-operator-integ-suite.sh +++ b/tests/e2e/common-operator-integ-suite.sh @@ -284,7 +284,7 @@ fi # Run the go test passing the env variables defined that are going to be used in the operator tests # shellcheck disable=SC2086 -IMAGE="${HUB}/${IMAGE_BASE}:${TAG}" SKIP_DEPLOY="${SKIP_DEPLOY}" OCP="${OCP}" ISTIO_MANIFEST="${ISTIO_MANIFEST}" \ +IMAGE="${HUB}/${IMAGE_BASE}:${TAG}" SKIP_DEPLOY="${SKIP_DEPLOY}" OCP="${OCP}" IP_FAMILY="${IP_FAMILY}" ISTIO_MANIFEST="${ISTIO_MANIFEST}" \ NAMESPACE="${NAMESPACE}" CONTROL_PLANE_NS="${CONTROL_PLANE_NS}" DEPLOYMENT_NAME="${DEPLOYMENT_NAME}" MULTICLUSTER="${MULTICLUSTER}" ARTIFACTS="${ARTIFACTS}" \ ISTIO_NAME="${ISTIO_NAME}" COMMAND="${COMMAND}" VERSIONS_YAML_FILE="${VERSIONS_YAML_FILE}" KUBECONFIG="${KUBECONFIG}" ISTIOCTL_PATH="${ISTIOCTL}" \ go run github.com/onsi/ginkgo/v2/ginkgo -tags e2e --timeout 30m --junit-report=report.xml ${GINKGO_FLAGS} "${WD}"/... diff --git a/tests/e2e/dualstack/dualstack_suite_test.go b/tests/e2e/dualstack/dualstack_suite_test.go new file mode 100644 index 000000000..20661a840 --- /dev/null +++ b/tests/e2e/dualstack/dualstack_suite_test.go @@ -0,0 +1,65 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dualstack + +import ( + "testing" + + k8sclient "github.com/istio-ecosystem/sail-operator/tests/e2e/util/client" + env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + cl client.Client + err error + ocp = env.GetBool("OCP", false) + namespace = env.Get("NAMESPACE", "sail-operator") + deploymentName = env.Get("DEPLOYMENT_NAME", "sail-operator") + controlPlaneNamespace = env.Get("CONTROL_PLANE_NS", "istio-system") + istioName = env.Get("ISTIO_NAME", "default") + image = env.Get("IMAGE", "quay.io/maistra-dev/sail-operator:latest") + skipDeploy = env.GetBool("SKIP_DEPLOY", false) + expectedRegistry = env.Get("EXPECTED_REGISTRY", "^docker\\.io|^gcr\\.io") + multicluster = env.GetBool("MULTICLUSTER", false) + ipFamily = env.Get("IP_FAMILY", "ipv4") + + k *kubectl.KubectlBuilder +) + +func TestDualStack(t *testing.T) { + if ipFamily != "dual" || multicluster { + t.Skip("Skipping the dualStack tests") + } + + RegisterFailHandler(Fail) + setup() + RunSpecs(t, "DualStack test suite") +} + +func setup() { + GinkgoWriter.Println("************ Running Setup ************") + + GinkgoWriter.Println("Initializing k8s client") + cl, err = k8sclient.InitK8sClient("") + Expect(err).NotTo(HaveOccurred()) + + k = kubectl.NewKubectlBuilder() +} diff --git a/tests/e2e/dualstack/dualstack_test.go b/tests/e2e/dualstack/dualstack_test.go new file mode 100644 index 000000000..3479a141b --- /dev/null +++ b/tests/e2e/dualstack/dualstack_test.go @@ -0,0 +1,213 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR Condition OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dualstack + +import ( + "fmt" + "path/filepath" + "time" + + "github.com/istio-ecosystem/sail-operator/api/v1alpha1" + "github.com/istio-ecosystem/sail-operator/pkg/kube" + "github.com/istio-ecosystem/sail-operator/pkg/test/project" + . "github.com/istio-ecosystem/sail-operator/pkg/test/util/ginkgo" + "github.com/istio-ecosystem/sail-operator/pkg/test/util/supportedversion" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("DualStack configuration ", Ordered, func() { + SetDefaultEventuallyTimeout(180 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + debugInfoLogged := false + + BeforeAll(func(ctx SpecContext) { + Expect(k.CreateNamespace(namespace)).To(Succeed(), "Namespace failed to be created") + + extraArg := "" + if ocp { + extraArg = "--set=platform=openshift" + } + + if skipDeploy { + Success("Skipping operator installation because it was deployed externally") + } else { + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, extraArg)). + To(Succeed(), "Operator failed to be deployed") + } + + Eventually(common.GetObject).WithArguments(ctx, cl, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the namespace and Running") + }) + + Describe("for supported versions", func() { + for _, version := range supportedversion.List { + // Note: This var version is needed to avoid the closure of the loop + version := version + + // The minimum supported version is 1.23 (and above) + if version.Major == 1 && version.Minor < 23 { + continue + } + + Context("Istio version is: "+version.Version, func() { + BeforeAll(func() { + Expect(k.CreateNamespace(controlPlaneNamespace)).To(Succeed(), "Istio namespace failed to be created") + }) + + When("the Istio CR is created with DualStack configuration", func() { + BeforeAll(func() { + istioYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: Istio +metadata: + name: default +spec: + values: + meshConfig: + defaultConfig: + proxyMetadata: + ISTIO_DUAL_STACK: "true" + pilot: + ipFamilyPolicy: %s + env: + ISTIO_DUAL_STACK: "true" + version: %s + namespace: %s` + istioYAML = fmt.Sprintf(istioYAML, corev1.IPFamilyPolicyRequireDualStack, version.Name, controlPlaneNamespace) + Log("Istio YAML:", istioYAML) + Expect(k.CreateFromString(istioYAML)). + To(Succeed(), "Istio CR failed to be created") + Success("Istio CR created") + }) + + It("updates the Istio CR status to Reconciled", func(ctx SpecContext) { + Eventually(common.GetObject).WithArguments(ctx, cl, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReconciled, metav1.ConditionTrue), "Istio is not Reconciled; unexpected Condition") + Success("Istio CR is Reconciled") + }) + + It("updates the Istio CR status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject).WithArguments(ctx, cl, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready; unexpected Condition") + Success("Istio CR is Ready") + }) + + It("deploys istiod", func(ctx SpecContext) { + Eventually(common.GetObject).WithArguments(ctx, cl, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running") + }) + + It("uses the correct image", func(ctx SpecContext) { + Expect(common.GetObject(ctx, cl, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{})). + To(HaveContainersThat(HaveEach(ImageFromRegistry(expectedRegistry)))) + }) + + It("has ISTIO_DUAL_STACK env variable set", func(ctx SpecContext) { + Expect(common.GetObject(ctx, cl, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{})). + To(HaveContainersThat(ContainElement(WithTransform(getEnvVars, ContainElement(corev1.EnvVar{Name: "ISTIO_DUAL_STACK", Value: "true"})))), + "Expected ISTIO_DUAL_STACK to be set to true, but not found") + }) + + It("deploys istiod service in dualStack mode", func(ctx SpecContext) { + var istiodSvcObj corev1.Service + + Eventually(func() error { + _, err := common.GetObject(ctx, cl, kube.Key("istiod", controlPlaneNamespace), &istiodSvcObj) + return err + }).Should(Succeed(), "Expected to retrieve the 'istiod' service") + + Expect(istiodSvcObj.Spec.IPFamilyPolicy).ToNot(BeNil(), "Expected IPFamilyPolicy to be set") + Expect(*istiodSvcObj.Spec.IPFamilyPolicy).To(Equal(corev1.IPFamilyPolicyRequireDualStack), "Expected ipFamilyPolicy to be 'RequireDualStack'") + Success("Istio Service is deployed in the namespace and Running") + }) + }) + + When("the Istio CR is deleted", func() { + BeforeEach(func() { + Expect(k.SetNamespace(controlPlaneNamespace).Delete("istio", istioName)).To(Succeed(), "Istio CR failed to be deleted") + Success("Istio CR deleted") + }) + + It("removes everything from the namespace", func(ctx SpecContext) { + Eventually(cl.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore") + common.CheckNamespaceEmpty(ctx, cl, controlPlaneNamespace) + Success("Namespace is empty") + }) + }) + }) + } + + AfterAll(func(ctx SpecContext) { + if CurrentSpecReport().Failed() { + common.LogDebugInfo() + debugInfoLogged = true + } + + By("Cleaning up the Istio namespace") + Expect(cl.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: controlPlaneNamespace}})).To(Succeed(), "Istio Namespace failed to be deleted") + + By("Deleting any left-over Istio and IstioRevision resources") + Success("Resources deleted") + Success("Cleanup done") + }) + }) + + AfterAll(func() { + if CurrentSpecReport().Failed() && !debugInfoLogged { + common.LogDebugInfo() + debugInfoLogged = true + } + + if skipDeploy { + Success("Skipping operator undeploy because it was deployed externally") + return + } + + By("Deleting operator deployment") + Expect(helm.Uninstall("sail-operator", "--namespace "+namespace)). + To(Succeed(), "Operator failed to be deleted") + GinkgoWriter.Println("Operator uninstalled") + + Expect(k.DeleteNamespace(namespace)).To(Succeed(), "Namespace failed to be deleted") + Success("Namespace deleted") + }) +}) + +func HaveContainersThat(matcher types.GomegaMatcher) types.GomegaMatcher { + return HaveField("Spec.Template.Spec.Containers", matcher) +} + +func ImageFromRegistry(regexp string) types.GomegaMatcher { + return HaveField("Image", MatchRegexp(regexp)) +} + +func getEnvVars(container corev1.Container) []corev1.EnvVar { + return container.Env +} diff --git a/tests/e2e/integ-suite-kind.sh b/tests/e2e/integ-suite-kind.sh index 90a939851..e4442e278 100755 --- a/tests/e2e/integ-suite-kind.sh +++ b/tests/e2e/integ-suite-kind.sh @@ -101,5 +101,5 @@ echo "Running integration tests" if [ "${MULTICLUSTER}" == "true" ]; then ARTIFACTS="${ARTIFACTS}" ISTIOCTL="${ISTIOCTL}" ./tests/e2e/common-operator-integ-suite.sh --kind --multicluster else -ARTIFACTS="${ARTIFACTS}" ./tests/e2e/common-operator-integ-suite.sh --kind +ARTIFACTS="${ARTIFACTS}" IP_FAMILY="${IP_FAMILY}" ./tests/e2e/common-operator-integ-suite.sh --kind fi \ No newline at end of file From c6f30e2197c94c717818cbdcef02d66fe4393b3c Mon Sep 17 00:00:00 2001 From: Sridhar Gaddam Date: Tue, 1 Oct 2024 18:24:30 +0530 Subject: [PATCH 23/25] Fix broken url for sample gateway (#377) Signed-off-by: Sridhar Gaddam --- docs/common/create-and-configure-gateways.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/common/create-and-configure-gateways.md b/docs/common/create-and-configure-gateways.md index 00fdca0ee..29e35bd75 100644 --- a/docs/common/create-and-configure-gateways.md +++ b/docs/common/create-and-configure-gateways.md @@ -16,7 +16,7 @@ that can be made accessible from outside the cluster. For more information, see [Installing Gateways](https://preliminary.istio.io/latest/docs/setup/additional-setup/gateway/#deploying-a-gateway). To configure gateway injection with the `bookinfo` application, we have provided -a [sample gateway configuration](../chart/samples/ingress-gateway.yaml?raw=1) that should be applied in the namespace +a [sample gateway configuration](../../chart/samples/ingress-gateway.yaml?raw=1) that should be applied in the namespace where the application is installed: 1. Create the `istio-ingressgateway` deployment and service: From 93cd45543893d47f6262b77f2bc9fec80d7a9254 Mon Sep 17 00:00:00 2001 From: Jamie Longmuir Date: Tue, 1 Oct 2024 09:40:29 -0400 Subject: [PATCH 24/25] Update/Fix Istioctl links and docs (#349) * Update/Fix istioctl links in README.md Update/Fix istioctl links to use install-istioctl-tool.md, which provides instructions for installing istioctl independent of Istio. Signed-off-by: Jamie Longmuir * Update install-istioctl-tool.md to clarify steps and link to upstream doc Signed-off-by: Jamie Longmuir * remove namespace from get istio command Signed-off-by: Jamie Longmuir --------- Signed-off-by: Jamie Longmuir --- docs/README.md | 6 +++--- docs/common/install-istioctl-tool.md | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/README.md b/docs/README.md index a950d7ac6..e2c1572fd 100644 --- a/docs/README.md +++ b/docs/README.md @@ -265,7 +265,7 @@ When the `InPlace` strategy is used, the existing Istio control plane is replace Prerequisites: * Sail Operator is installed. -* `istioctl` is [installed](common/istio-addons-integrations.md). +* `istioctl` is [installed](common/install-istioctl-tool.md). Steps: 1. Create the `istio-system` namespace. @@ -341,7 +341,7 @@ When the `RevisionBased` strategy is used, a new Istio control plane instance is Prerequisites: * Sail Operator is installed. -* `istioctl` is [installed](common/istio-addons-integrations.md). +* `istioctl` is [installed](common/install-istioctl-tool.md). Steps: @@ -479,7 +479,7 @@ You can use the Sail Operator and the Sail CRDs to manage a multi-cluster Istio ### Prerequisites -- Install [istioctl](https://istio.io/latest/docs/setup/install/istioctl) and have it included in your `$PATH`. +- Install [istioctl](common/install-istioctl-tool.md). - Two kubernetes clusters with external lb support. (If using kind, `cloud-provider-kind` is running in the background) - kubeconfig file with a context for each cluster. - Install the Sail Operator and the Sail CRDs to every cluster. diff --git a/docs/common/install-istioctl-tool.md b/docs/common/install-istioctl-tool.md index ec489105e..68fa0dcf3 100644 --- a/docs/common/install-istioctl-tool.md +++ b/docs/common/install-istioctl-tool.md @@ -8,8 +8,7 @@ operators to debug and diagnose Istio service mesh deployments. Use an `istioctl` version that is the same version as the Istio control plane for the Service Mesh deployment. See [Istio Releases](https://github.com/istio/istio/releases) for a list of valid -releases, including Beta releases. - +releases, including Beta releases. ### Procedure @@ -24,7 +23,7 @@ the following command at the terminal: at the terminal: ```sh - $ oc -n istio-system get istio + $ oc get istio ``` 3. Install `istioctl` by running the following command at the terminal: @@ -47,6 +46,6 @@ at the terminal: ```sh $ istioctl version ``` - +For more information on usage, see the [Istioctl documentation](https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/). *Note*: `istioctl install` is not supported. The Sail Operator installs Istio. From c7164d4a99dd5483d9fbca30297d6f8e03de1002 Mon Sep 17 00:00:00 2001 From: Filip Brychta Date: Tue, 1 Oct 2024 19:52:28 +0200 Subject: [PATCH 25/25] Adding default value for IP_FAMILY (#378) This fixing ./tests/e2e/common-operator-integ-suite.sh: line 287: IP_FAMILY: unbound variable Signed-off-by: Filip Brychta --- tests/e2e/common-operator-integ-suite.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/e2e/common-operator-integ-suite.sh b/tests/e2e/common-operator-integ-suite.sh index e54f57465..8ad6b80c5 100755 --- a/tests/e2e/common-operator-integ-suite.sh +++ b/tests/e2e/common-operator-integ-suite.sh @@ -120,6 +120,7 @@ initialize_variables() { ISTIOCTL="${ISTIOCTL:-"istioctl"}" LOCALBIN="${LOCALBIN:-${HOME}/bin}" OPERATOR_SDK=${LOCALBIN}/operator-sdk + IP_FAMILY=${IP_FAMILY:-ipv4} if [ "${OCP}" == "true" ]; then COMMAND="oc"