diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 0b1d2b6d5..591b24ffa 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "name": "istio build-tools", - "image": "gcr.io/istio-testing/build-tools:master-0aa2afb4bac9a4fd1bfe50a929c077a643066b3a", + "image": "gcr.io/istio-testing/build-tools:master-8584ca511549c1cd96d9cb8b900297de83f4cb64", "privileged": true, "remoteEnv": { "USE_GKE_GCLOUD_AUTH_PLUGIN": "True", diff --git a/Makefile.core.mk b/Makefile.core.mk index 3a3cd3d7f..07a3ce318 100644 --- a/Makefile.core.mk +++ b/Makefile.core.mk @@ -165,8 +165,8 @@ test.e2e.ocp: ## Run the end-to-end tests against an existing OCP cluster. GINKGO_FLAGS="$(GINKGO_FLAGS)" ${SOURCE_DIR}/tests/e2e/integ-suite-ocp.sh .PHONY: test.e2e.kind -test.e2e.kind: ## Deploy a KinD cluster and run the end-to-end tests against it. - GINKGO_FLAGS="$(GINKGO_FLAGS)" ${SOURCE_DIR}/tests/e2e/integ-suite-kind.sh +test.e2e.kind: istioctl ## Deploy a KinD cluster and run the end-to-end tests against it. + GINKGO_FLAGS="$(GINKGO_FLAGS)" ISTIOCTL="$(ISTIOCTL)" ${SOURCE_DIR}/tests/e2e/integ-suite-kind.sh .PHONY: test.e2e.describe test.e2e.describe: ## Runs ginkgo outline -format indent over the e2e test to show in BDD style the steps and test structure @@ -450,6 +450,7 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest GITLEAKS ?= $(LOCALBIN)/gitleaks OPM ?= $(LOCALBIN)/opm +ISTIOCTL ?= $(LOCALBIN)/istioctl ## Tool Versions OPERATOR_SDK_VERSION ?= v1.36.1 @@ -457,6 +458,7 @@ HELM_VERSION ?= v3.15.3 CONTROLLER_TOOLS_VERSION ?= v0.16.0 OPM_VERSION ?= v1.45.0 GITLEAKS_VERSION ?= v8.18.4 +ISTIOCTL_VERSION ?= 1.23.0 # GENERATE_RELATED_IMAGES defines whether `spec.relatedImages` is going to be generated or not # To disable set flag to false @@ -483,6 +485,28 @@ $(OPERATOR_SDK): $(LOCALBIN) curl -sSLfo $(LOCALBIN)/operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) && \ chmod +x $(LOCALBIN)/operator-sdk; +.PHONY: istioctl $(ISTIOCTL) +istioctl: $(ISTIOCTL) ## Download istioctl to bin directory. +istioctl: TARGET_OS=$(shell go env GOOS) +istioctl: TARGET_ARCH=$(shell go env GOARCH) +$(ISTIOCTL): $(LOCALBIN) + @test -s $(LOCALBIN)/istioctl || { \ + OSEXT=$(if $(filter $(TARGET_OS),Darwin),osx,linux); \ + URL="https://github.com/istio/istio/releases/download/$(ISTIOCTL_VERSION)/istioctl-$(ISTIOCTL_VERSION)-$$OSEXT-$(TARGET_ARCH).tar.gz"; \ + echo "Fetching istioctl from $$URL"; \ + curl -fsL $$URL -o /tmp/istioctl.tar.gz || { \ + echo "Download failed! Please check the URL and ISTIO_VERSION."; \ + exit 1; \ + }; \ + tar -xzf /tmp/istioctl.tar.gz -C /tmp || { \ + echo "Extraction failed!"; \ + exit 1; \ + }; \ + mv /tmp/istioctl $(LOCALBIN)/istioctl; \ + rm -f /tmp/istioctl.tar.gz; \ + echo "istioctl has been downloaded and placed in $(LOCALBIN)"; \ + } + .PHONY: controller-gen controller-gen: $(LOCALBIN) ## Download controller-gen to bin directory. If wrong version is installed, it will be overwritten. @test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ diff --git a/common/scripts/kind_provisioner.sh b/common/scripts/kind_provisioner.sh index 9c372b9ca..a2a5691f5 100644 --- a/common/scripts/kind_provisioner.sh +++ b/common/scripts/kind_provisioner.sh @@ -34,6 +34,9 @@ set -x # DEFAULT_KIND_IMAGE is used to set the Kubernetes version for KinD unless overridden in params to setup_kind_cluster(s) DEFAULT_KIND_IMAGE="gcr.io/istio-testing/kind-node:v1.28.4" +# the default kind cluster should be ipv4 if not otherwise specified +IP_FAMILY="${IP_FAMILY:-ipv4}" + # COMMON_SCRIPTS contains the directory this file is in. COMMON_SCRIPTS=$(dirname "${BASH_SOURCE:-$0}") @@ -174,11 +177,6 @@ function setup_kind_cluster() { CONFIG=${DEFAULT_CLUSTER_YAML} fi - # Configure the ipFamily of the cluster - if [ -n "${IP_FAMILY}" ]; then - yq eval ".networking.ipFamily = \"${IP_FAMILY}\"" -i "${CONFIG}" - fi - KIND_WAIT_FLAG="--wait=180s" KIND_DISABLE_CNI="false" if [[ -n "${KUBERNETES_CNI:-}" ]]; then @@ -187,7 +185,8 @@ function setup_kind_cluster() { fi # Create KinD cluster - if ! (yq eval "${CONFIG}" --expression ".networking.disableDefaultCNI = ${KIND_DISABLE_CNI}" | \ + if ! (yq eval "${CONFIG}" --expression ".networking.disableDefaultCNI = ${KIND_DISABLE_CNI}" \ + --expression ".networking.ipFamily = \"${IP_FAMILY}\"" | \ kind create cluster --name="${NAME}" -v4 --retain --image "${IMAGE}" ${KIND_WAIT_FLAG:+"$KIND_WAIT_FLAG"} --config -); then echo "Could not setup KinD environment. Something wrong with KinD setup. Exporting logs." return 9 @@ -478,4 +477,4 @@ function ips_to_cidrs() { from ipaddress import summarize_address_range, IPv4Address [ print(n.compressed) for n in summarize_address_range(IPv4Address(u'$IP_RANGE_START'), IPv4Address(u'$IP_RANGE_END')) ] EOF -} +} \ No newline at end of file diff --git a/common/scripts/setup_env.sh b/common/scripts/setup_env.sh index ee932a946..e2c5b9211 100755 --- a/common/scripts/setup_env.sh +++ b/common/scripts/setup_env.sh @@ -75,7 +75,7 @@ fi TOOLS_REGISTRY_PROVIDER=${TOOLS_REGISTRY_PROVIDER:-gcr.io} PROJECT_ID=${PROJECT_ID:-istio-testing} if [[ "${IMAGE_VERSION:-}" == "" ]]; then - IMAGE_VERSION=master-0aa2afb4bac9a4fd1bfe50a929c077a643066b3a + IMAGE_VERSION=master-8584ca511549c1cd96d9cb8b900297de83f4cb64 fi if [[ "${IMAGE_NAME:-}" == "" ]]; then IMAGE_NAME=build-tools diff --git a/pkg/test/util/supportedversion/supportedversion.go b/pkg/test/util/supportedversion/supportedversion.go index 59cd44e56..58778d956 100644 --- a/pkg/test/util/supportedversion/supportedversion.go +++ b/pkg/test/util/supportedversion/supportedversion.go @@ -17,6 +17,8 @@ package supportedversion import ( "os" "path/filepath" + "regexp" + "strconv" "github.com/istio-ecosystem/sail-operator/pkg/test/project" "gopkg.in/yaml.v3" @@ -47,6 +49,12 @@ func init() { panic(err) } + // Major, Minor and Patch needs to be set from parsing the version string + for i := range versions.Versions { + v := &versions.Versions[i] + v.Major, v.Minor, v.Patch = parseVersion(v.Version) + } + List = versions.Versions Default = List[0].Name if len(List) > 1 { @@ -55,6 +63,22 @@ func init() { New = List[0].Name } +func parseVersion(version string) (int, int, int) { + // The version can have this formats: "1.22.2", "1.23.0-rc.1", "1.24-alpha" + re := regexp.MustCompile(`^(\d+)\.(\d+)\.?(\d*)`) + + matches := re.FindStringSubmatch(version) + if len(matches) < 4 { + return 0, 0, 0 + } + + major, _ := strconv.Atoi(matches[1]) + minor, _ := strconv.Atoi(matches[2]) + patch, _ := strconv.Atoi(matches[3]) + + return major, minor, patch +} + type Versions struct { Versions []VersionInfo `json:"versions"` } @@ -62,6 +86,9 @@ type Versions struct { type VersionInfo struct { Name string `json:"name"` Version string `json:"version"` + Major int `json:"major"` + Minor int `json:"minor"` + Patch int `json:"patch"` Repo string `json:"repo"` Branch string `json:"branch,omitempty"` Commit string `json:"commit"` diff --git a/tests/e2e/common-operator-integ-suite.sh b/tests/e2e/common-operator-integ-suite.sh index d4fdedb1d..935cd1a3e 100755 --- a/tests/e2e/common-operator-integ-suite.sh +++ b/tests/e2e/common-operator-integ-suite.sh @@ -29,6 +29,7 @@ parse_flags() { SKIP_DEPLOY=${SKIP_DEPLOY:-false} OLM=${OLM:-false} DESCRIBE=false + MULTICLUSTER=false while [ $# -gt 0 ]; do case "$1" in --ocp) @@ -39,6 +40,10 @@ parse_flags() { shift OCP=false ;; + --multicluster) + shift + MULTICLUSTER=true + ;; --skip-build) shift SKIP_BUILD=true @@ -80,6 +85,10 @@ parse_flags() { echo "Running on kind" fi + if [ "${MULTICLUSTER}" == "true" ]; then + echo "Running on multicluster" + fi + if [ "${SKIP_BUILD}" == "true" ]; then echo "Skipping build" fi @@ -108,6 +117,7 @@ initialize_variables() { COMMAND="kubectl" ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}" KUBECONFIG="${KUBECONFIG:-"${ARTIFACTS}/config"}" + ISTIOCTL="${ISTIOCTL:-"istioctl"}" LOCALBIN="${LOCALBIN:-${HOME}/bin}" OPERATOR_SDK=${LOCALBIN}/operator-sdk @@ -258,6 +268,6 @@ fi # Run the go test passing the env variables defined that are going to be used in the operator tests # shellcheck disable=SC2086 IMAGE="${HUB}/${IMAGE_BASE}:${TAG}" SKIP_DEPLOY="${SKIP_DEPLOY}" OCP="${OCP}" ISTIO_MANIFEST="${ISTIO_MANIFEST}" \ -NAMESPACE="${NAMESPACE}" CONTROL_PLANE_NS="${CONTROL_PLANE_NS}" DEPLOYMENT_NAME="${DEPLOYMENT_NAME}" \ -ISTIO_NAME="${ISTIO_NAME}" COMMAND="${COMMAND}" VERSIONS_YAML_FILE="${VERSIONS_YAML_FILE}" KUBECONFIG="${KUBECONFIG}" \ +NAMESPACE="${NAMESPACE}" CONTROL_PLANE_NS="${CONTROL_PLANE_NS}" DEPLOYMENT_NAME="${DEPLOYMENT_NAME}" MULTICLUSTER="${MULTICLUSTER}" ARTIFACTS="${ARTIFACTS}" \ +ISTIO_NAME="${ISTIO_NAME}" COMMAND="${COMMAND}" VERSIONS_YAML_FILE="${VERSIONS_YAML_FILE}" KUBECONFIG="${KUBECONFIG}" ISTIOCTL_PATH="${ISTIOCTL}" \ go run github.com/onsi/ginkgo/v2/ginkgo -tags e2e --timeout 30m --junit-report=report.xml ${GINKGO_FLAGS} "${WD}"/... diff --git a/tests/e2e/config/default.yaml b/tests/e2e/config/default.yaml index 58c947c9e..9f160497d 100644 --- a/tests/e2e/config/default.yaml +++ b/tests/e2e/config/default.yaml @@ -27,9 +27,4 @@ containerdConfigPatches: - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] endpoint = ["http://kind-registry:5000"] -networking: - # MAISTRA specific: - # our prow cluster uses serviceSubnet 10.96.0.0/12, so the kind cluster must use other subnet to correctly route traffic; - # in this case, address 10.224.0.0 is chosen randomly from available set of subnets. - serviceSubnet: "10.224.0.0/12" - ipFamily: ipv4 + diff --git a/tests/e2e/config/multicluster.json b/tests/e2e/config/multicluster.json new file mode 100644 index 000000000..247824a37 --- /dev/null +++ b/tests/e2e/config/multicluster.json @@ -0,0 +1,14 @@ +[ + { + "cluster_name": "primary", + "pod_subnet": "10.10.0.0/16", + "svc_subnet": "10.255.10.0/24", + "network_id": "0" + }, + { + "cluster_name": "remote", + "pod_subnet": "10.20.0.0/16", + "svc_subnet": "10.255.20.0/24", + "network_id": "1" + } +] \ No newline at end of file diff --git a/tests/e2e/controlplane/control_plane_suite_test.go b/tests/e2e/controlplane/control_plane_suite_test.go index 1603b380e..872a0578a 100644 --- a/tests/e2e/controlplane/control_plane_suite_test.go +++ b/tests/e2e/controlplane/control_plane_suite_test.go @@ -40,9 +40,13 @@ var ( skipDeploy = env.GetBool("SKIP_DEPLOY", false) expectedRegistry = env.Get("EXPECTED_REGISTRY", "^docker\\.io|^gcr\\.io") bookinfoNamespace = env.Get("BOOKINFO_NAMESPACE", "bookinfo") + multicluster = env.GetBool("MULTICLUSTER", false) ) func TestInstall(t *testing.T) { + if multicluster { + t.Skip("Skipping test for multicluster") + } RegisterFailHandler(Fail) setup() RunSpecs(t, "Control Plane Suite") @@ -52,6 +56,6 @@ func setup() { GinkgoWriter.Println("************ Running Setup ************") GinkgoWriter.Println("Initializing k8s client") - cl, err = k8sclient.InitK8sClient() + cl, err = k8sclient.InitK8sClient("") Expect(err).NotTo(HaveOccurred()) } diff --git a/tests/e2e/controlplane/control_plane_test.go b/tests/e2e/controlplane/control_plane_test.go index ab3007e2f..ec7a6574a 100644 --- a/tests/e2e/controlplane/control_plane_test.go +++ b/tests/e2e/controlplane/control_plane_test.go @@ -19,7 +19,6 @@ package controlplane import ( "fmt" "path/filepath" - "regexp" "strings" "time" @@ -43,12 +42,6 @@ import ( "istio.io/istio/pkg/ptr" ) -// version can have one of the following formats: -// - 1.22.2 -// - 1.23.0-rc.1 -// - 1.24-alpha -var istiodVersionRegex = regexp.MustCompile(`Version:"(\d+\.\d+(\.\d+)?(-\w+(\.\d+)?)?)`) - var _ = Describe("Control Plane Installation", Ordered, func() { SetDefaultEventuallyTimeout(180 * time.Second) SetDefaultEventuallyPollingInterval(time.Second) @@ -219,7 +212,7 @@ spec: It("deploys istiod", func(ctx SpecContext) { Eventually(common.GetObject).WithArguments(ctx, cl, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available; unexpected Condition") - Expect(getVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") Success("Istiod is deployed in the namespace and Running") }) @@ -356,19 +349,6 @@ func ImageFromRegistry(regexp string) types.GomegaMatcher { return HaveField("Image", MatchRegexp(regexp)) } -func getVersionFromIstiod() (string, error) { - output, err := kubectl.Exec(controlPlaneNamespace, "deploy/istiod", "", "pilot-discovery version") - if err != nil { - return "", fmt.Errorf("error getting version from istiod: %w", err) - } - - matches := istiodVersionRegex.FindStringSubmatch(output) - if len(matches) > 1 && matches[1] != "" { - return matches[1], nil - } - return "", fmt.Errorf("error getting version from istiod: version not found in output: %s", output) -} - func indent(level int, str string) string { indent := strings.Repeat(" ", level) return indent + strings.ReplaceAll(str, "\n", "\n"+indent) diff --git a/tests/e2e/integ-suite-kind.sh b/tests/e2e/integ-suite-kind.sh index 39a5b415e..90a939851 100755 --- a/tests/e2e/integ-suite-kind.sh +++ b/tests/e2e/integ-suite-kind.sh @@ -29,9 +29,19 @@ export KIND_REGISTRY="localhost:${KIND_REGISTRY_PORT}" export DEFAULT_CLUSTER_YAML="${SCRIPTPATH}/config/default.yaml" export IP_FAMILY="${IP_FAMILY:-ipv4}" export ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}" +export MULTICLUSTER="${MULTICLUSTER:-false}" +# Set variable to exclude kind clusters from kubectl annotations. +# You need to set kind clusters names separated by comma +export KIND_EXCLUDE_CLUSTERS="${KIND_EXCLUDE_CLUSTERS:-}" +export ISTIOCTL="${ISTIOCTL:-${ROOT}/bin/istioctl}" + # Set variable for cluster kind name export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-operator-integration-tests}" +if [ "${MULTICLUSTER}" == "true" ]; then + export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME}-1" + export KIND_CLUSTER_NAME_2="${KIND_CLUSTER_NAME}-2" +fi # Use the local registry instead of the default HUB export HUB="${KIND_REGISTRY}" @@ -52,16 +62,44 @@ function setup_kind_registry() { fi # https://docs.tilt.dev/choosing_clusters.html#discovering-the-registry - # TODO get context/config from existing variables - kind export kubeconfig --name="${KIND_CLUSTER_NAME}" - for node in $(kind get nodes --name="${KIND_CLUSTER_NAME}"); do - kubectl annotate node "${node}" "kind.x-k8s.io/registry=localhost:${KIND_REGISTRY_PORT}" --overwrite; + for cluster in $(kind get clusters); do + # TODO get context/config from existing variables + # Avoid adding the registry to excluded clusters. Use when you have multiple clusters running. + if [[ "${KIND_EXCLUDE_CLUSTERS}" == *"${cluster}"* ]]; then + continue + fi + + kind export kubeconfig --name="${cluster}" + for node in $(kind get nodes --name="${cluster}"); do + kubectl annotate node "${node}" "kind.x-k8s.io/registry=localhost:${KIND_REGISTRY_PORT}" --overwrite; + done done } -KUBECONFIG="${ARTIFACTS}/config" setup_kind_cluster "${KIND_CLUSTER_NAME}" "" "" "true" "true" -setup_kind_registry +if [ "${MULTICLUSTER}" == "true" ]; then + CLUSTER_TOPOLOGY_CONFIG_FILE="${SCRIPTPATH}/config/multicluster.json" + load_cluster_topology "${CLUSTER_TOPOLOGY_CONFIG_FILE}" + setup_kind_clusters "" "" + setup_kind_registry + + export KUBECONFIG="${KUBECONFIGS[0]}" + export KUBECONFIG2="${KUBECONFIGS[1]}" +else + KUBECONFIG="${ARTIFACTS}/config" setup_kind_cluster "${KIND_CLUSTER_NAME}" "" "" "true" "true" + setup_kind_registry +fi + + +# Check that istioctl is present using ${ISTIOCTL} +if ! command -v "${ISTIOCTL}" &> /dev/null; then + echo "istioctl not found. Please set the ISTIOCTL environment variable to the path of the istioctl binary" + exit 1 +fi # Run the integration tests echo "Running integration tests" +if [ "${MULTICLUSTER}" == "true" ]; then + ARTIFACTS="${ARTIFACTS}" ISTIOCTL="${ISTIOCTL}" ./tests/e2e/common-operator-integ-suite.sh --kind --multicluster +else ARTIFACTS="${ARTIFACTS}" ./tests/e2e/common-operator-integ-suite.sh --kind +fi \ No newline at end of file diff --git a/tests/e2e/multicluster/multicluster_multiprimary_test.go b/tests/e2e/multicluster/multicluster_multiprimary_test.go new file mode 100644 index 000000000..97c404c52 --- /dev/null +++ b/tests/e2e/multicluster/multicluster_multiprimary_test.go @@ -0,0 +1,344 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multicluster + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/istio-ecosystem/sail-operator/api/v1alpha1" + "github.com/istio-ecosystem/sail-operator/pkg/kube" + "github.com/istio-ecosystem/sail-operator/pkg/test/project" + . "github.com/istio-ecosystem/sail-operator/pkg/test/util/ginkgo" + "github.com/istio-ecosystem/sail-operator/pkg/test/util/supportedversion" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/istioctl" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Multicluster deployment models", Ordered, func() { + SetDefaultEventuallyTimeout(180 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + BeforeAll(func(ctx SpecContext) { + if !skipDeploy { + // Deploy the Sail Operator on both clusters + Expect(kubectl.CreateNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be created on Cluster #1") + Expect(kubectl.CreateNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created on Cluster #2") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig)). + To(Succeed(), "Operator failed to be deployed in Cluster #1") + + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Cluster #1 namespace and Running") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig2)). + To(Succeed(), "Operator failed to be deployed in Cluster #2") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Cluster #2 namespace and Running") + } + }) + + Describe("Multi-Primary Multi-Network configuration", func() { + // Test the Multi-Primary Multi-Network configuration for each supported Istio version + for _, version := range supportedversion.List { + Context("Istio version is: "+version.Version, func() { + When("Istio resources are created in both clusters with multicluster configuration", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be created") + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + + // Push the intermediate CA to both clusters + certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig, "east", "network1", artifacts, clPrimary) + certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig2, "west", "network2", artifacts, clRemote) + + // Wait for the secret to be created in both clusters + Eventually(func() error { + _, err := common.GetObject(context.Background(), clPrimary, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Cluster #1") + + Eventually(func() error { + _, err := common.GetObject(context.Background(), clRemote, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Cluster #1") + + multiclusterYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: Istio +metadata: + name: default +spec: + version: %s + namespace: %s + values: + global: + meshID: %s + multiCluster: + clusterName: %s + network: %s` + multiclusterCluster1YAML := fmt.Sprintf(multiclusterYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster1", "network1") + Log("Istio CR Cluster #1: ", multiclusterCluster1YAML) + Expect(kubectl.CreateFromString(multiclusterCluster1YAML, kubeconfig)).To(Succeed(), "Istio Resource creation failed on Cluster #1") + + multiclusterCluster2YAML := fmt.Sprintf(multiclusterYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster2", "network2") + Log("Istio CR Cluster #2: ", multiclusterCluster2YAML) + Expect(kubectl.CreateFromString(multiclusterCluster2YAML, kubeconfig2)).To(Succeed(), "Istio Resource creation failed on Cluster #2") + }) + + It("updates both Istio CR status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Cluster #1; unexpected Condition") + Success("Istio CR is Ready on Cluster #1") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Cluster #2; unexpected Condition") + Success("Istio CR is Ready on Cluster #1") + }) + + It("deploys istiod", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available on Cluster #1; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running on Cluster #1") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available on Cluster #2; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running on Cluster #2") + }) + }) + + When("Gateway is created in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.Apply(controlPlaneNamespace, eastGatewayYAML, kubeconfig)).To(Succeed(), "Gateway creation failed on Cluster #1") + + Expect(kubectl.Apply(controlPlaneNamespace, westGatewayYAML, kubeconfig2)).To(Succeed(), "Gateway creation failed on Cluster #2") + + // Expose the Gateway service in both clusters + Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig)).To(Succeed(), "Expose Service creation failed on Cluster #1") + Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig2)).To(Succeed(), "Expose Service creation failed on Cluster #2") + }) + + It("updates both Gateway status to Available", func(ctx SpecContext) { + Eventually((common.GetObject)). + WithArguments(ctx, clPrimary, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Cluster #1; unexpected Condition") + + Eventually((common.GetObject)). + WithArguments(ctx, clRemote, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Cluster #2; unexpected Condition") + Success("Gateway is created and available in both clusters") + }) + }) + + When("are installed remote secrets on each cluster", func() { + BeforeAll(func(ctx SpecContext) { + // Get the internal IP of the control plane node in both clusters + internalIPCluster1, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig) + Expect(err).NotTo(HaveOccurred()) + Expect(internalIPCluster1).NotTo(BeEmpty(), "Internal IP is empty for Cluster #1") + + internalIPCluster2, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig2) + Expect(internalIPCluster2).NotTo(BeEmpty(), "Internal IP is empty for Cluster #2") + Expect(err).NotTo(HaveOccurred()) + + // Install a remote secret in Cluster #1 that provides access to the Cluster #2 API server. + secret, err := istioctl.CreateRemoteSecret(kubeconfig2, "cluster2", internalIPCluster2) + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl.ApplyString("", secret, kubeconfig)).To(Succeed(), "Remote secret creation failed on Cluster #1") + + // Install a remote secret in Cluster #2 that provides access to the Cluster #1 API server. + secret, err = istioctl.CreateRemoteSecret(kubeconfig, "cluster1", internalIPCluster1) + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl.ApplyString("", secret, kubeconfig2)).To(Succeed(), "Remote secret creation failed on Cluster #1") + }) + + It("remote secrets are created", func(ctx SpecContext) { + secret, err := common.GetObject(ctx, clPrimary, kube.Key("istio-remote-secret-cluster2", controlPlaneNamespace), &corev1.Secret{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil(), "Secret is not created on Cluster #1") + + secret, err = common.GetObject(ctx, clRemote, kube.Key("istio-remote-secret-cluster1", controlPlaneNamespace), &corev1.Secret{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil(), "Secret is not created on Cluster #2") + Success("Remote secrets are created in both clusters") + }) + }) + + When("sample apps are deployed in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + // Deploy the sample app in both clusters + deploySampleApp("sample", version, kubeconfig, kubeconfig2) + Success("Sample app is deployed in both clusters") + }) + + It("updates the pods status to Ready", func(ctx SpecContext) { + samplePodsCluster1 := &corev1.PodList{} + + clPrimary.List(ctx, samplePodsCluster1, client.InNamespace("sample")) + Expect(samplePodsCluster1.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsCluster1.Items { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Cluster #1; unexpected Condition") + } + + samplePodsCluster2 := &corev1.PodList{} + clRemote.List(ctx, samplePodsCluster2, client.InNamespace("sample")) + Expect(samplePodsCluster2.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsCluster2.Items { + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Cluster #2; unexpected Condition") + } + Success("Sample app is created in both clusters and Running") + }) + + It("can access the sample app from both clusters", func(ctx SpecContext) { + sleepPodNameCluster1, err := common.GetPodNameByLabel(ctx, clPrimary, "sample", "app", "sleep") + Expect(sleepPodNameCluster1).NotTo(BeEmpty(), "Sleep pod not found on Cluster #1") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Cluster #1") + + sleepPodNameCluster2, err := common.GetPodNameByLabel(ctx, clRemote, "sample", "app", "sleep") + Expect(sleepPodNameCluster2).NotTo(BeEmpty(), "Sleep pod not found on Cluster #2") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Cluster #2") + + // Run the curl command from the sleep pod in the Cluster #2 and get response list to validate that we get responses from both clusters + Cluster2Responses := strings.Join(getListCurlResponses(sleepPodNameCluster2, kubeconfig2), "\n") + Expect(Cluster2Responses).To(ContainSubstring("Hello version: v1"), "Responses from Cluster #2 are not the expected") + Expect(Cluster2Responses).To(ContainSubstring("Hello version: v2"), "Responses from Cluster #2 are not the expected") + + // Run the curl command from the sleep pod in the Cluster #1 and get response list to validate that we get responses from both clusters + Cluster1Responses := strings.Join(getListCurlResponses(sleepPodNameCluster1, kubeconfig), "\n") + Expect(Cluster1Responses).To(ContainSubstring("Hello version: v1"), "Responses from Cluster #1 are not the expected") + Expect(Cluster1Responses).To(ContainSubstring("Hello version: v2"), "Responses from Cluster #1 are not the expected") + Success("Sample app is accessible from both clusters") + }) + }) + + When("istio CR is deleted in both clusters", func() { + BeforeEach(func() { + // Delete the Istio CR in both clusters + Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig2)).To(Succeed(), "Istio CR failed to be deleted") + Success("Istio CR is deleted in both clusters") + }) + + It("removes istiod pod", func(ctx SpecContext) { + // Check istiod pod is deleted in both clusters + Eventually(clPrimary.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore on Cluster #1") + Eventually(clRemote.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore on Cluster #2") + }) + }) + + AfterAll(func(ctx SpecContext) { + // Delete namespace to ensure clean up for new tests iteration + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + + common.CheckNamespaceEmpty(ctx, clPrimary, controlPlaneNamespace) + common.CheckNamespaceEmpty(ctx, clRemote, controlPlaneNamespace) + Success("ControlPlane Namespaces are empty") + + // Delete the entire sample namespace in both clusters + Expect(kubectl.DeleteNamespace("sample", kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectl.DeleteNamespace("sample", kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + + common.CheckNamespaceEmpty(ctx, clPrimary, "sample") + common.CheckNamespaceEmpty(ctx, clRemote, "sample") + Success("Sample app is deleted in both clusters") + }) + }) + } + }) + + AfterAll(func(ctx SpecContext) { + // Delete the Sail Operator from both clusters + Expect(kubectl.DeleteNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Cluster #1") + Expect(kubectl.DeleteNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Cluster #2") + + // Delete the intermediate CA from both clusters + common.CheckNamespaceEmpty(ctx, clPrimary, namespace) + common.CheckNamespaceEmpty(ctx, clRemote, namespace) + }) +}) + +// deploySampleApp deploys the sample app in the given cluster +func deploySampleApp(ns string, istioVersion supportedversion.VersionInfo, kubeconfig string, kubeconfig2 string) { + // Create the namespace + Expect(kubectl.CreateNamespace(ns, kubeconfig)).To(Succeed(), "Namespace failed to be created") + Expect(kubectl.CreateNamespace(ns, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + + // Label the namespace + Expect(kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`)). + To(Succeed(), "Error patching sample namespace") + Expect(kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"istio-injection":"enabled"}}}`, kubeconfig2)). + To(Succeed(), "Error patching sample namespace") + + version := istioVersion.Version + // Deploy the sample app from upstream URL in both clusters + if istioVersion.Name == "latest" { + version = "master" + } + helloWorldURL := fmt.Sprintf("https://raw.githubusercontent.com/istio/istio/%s/samples/helloworld/helloworld.yaml", version) + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "service=helloworld", kubeconfig)).To(Succeed(), "Sample service deploy failed on Cluster #1") + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "service=helloworld", kubeconfig2)).To(Succeed(), "Sample service deploy failed on Cluster #2") + + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "version=v1", kubeconfig)).To(Succeed(), "Sample service deploy failed on Cluster #1") + Expect(kubectl.ApplyWithLabels(ns, helloWorldURL, "version=v2", kubeconfig2)).To(Succeed(), "Sample service deploy failed on Cluster #2") + + sleepURL := fmt.Sprintf("https://raw.githubusercontent.com/istio/istio/%s/samples/sleep/sleep.yaml", version) + Expect(kubectl.Apply(ns, sleepURL, kubeconfig)).To(Succeed(), "Sample sleep deploy failed on Cluster #1") + Expect(kubectl.Apply(ns, sleepURL, kubeconfig2)).To(Succeed(), "Sample sleep deploy failed on Cluster #2") +} + +// getListCurlResponses runs the curl command 10 times from the sleep pod in the given cluster and get response list +func getListCurlResponses(podName, kubeconfig string) []string { + var responses []string + for i := 0; i < 10; i++ { + response, err := kubectl.Exec("sample", podName, "sleep", "curl -sS helloworld.sample:5000/hello", kubeconfig) + Expect(err).NotTo(HaveOccurred()) + responses = append(responses, response) + } + return responses +} diff --git a/tests/e2e/multicluster/multicluster_primaryremote_test.go b/tests/e2e/multicluster/multicluster_primaryremote_test.go new file mode 100644 index 000000000..798db3c2c --- /dev/null +++ b/tests/e2e/multicluster/multicluster_primaryremote_test.go @@ -0,0 +1,346 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multicluster + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/istio-ecosystem/sail-operator/api/v1alpha1" + "github.com/istio-ecosystem/sail-operator/pkg/kube" + "github.com/istio-ecosystem/sail-operator/pkg/test/project" + . "github.com/istio-ecosystem/sail-operator/pkg/test/util/ginkgo" + "github.com/istio-ecosystem/sail-operator/pkg/test/util/supportedversion" + certs "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + . "github.com/istio-ecosystem/sail-operator/tests/e2e/util/gomega" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/helm" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/istioctl" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Multicluster deployment models", Ordered, func() { + SetDefaultEventuallyTimeout(180 * time.Second) + SetDefaultEventuallyPollingInterval(time.Second) + + BeforeAll(func(ctx SpecContext) { + if !skipDeploy { + // Deploy the Sail Operator on both clusters + Expect(kubectl.CreateNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be created on Primary Cluster") + Expect(kubectl.CreateNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created on Remote Cluster") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig)). + To(Succeed(), "Operator failed to be deployed in Primary Cluster") + + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Primary namespace and Running") + + Expect(helm.Install("sail-operator", filepath.Join(project.RootDir, "chart"), "--namespace "+namespace, "--set=image="+image, "--kubeconfig "+kubeconfig2)). + To(Succeed(), "Operator failed to be deployed in Remote Cluster") + + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(deploymentName, namespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Error getting Istio CRD") + Success("Operator is deployed in the Remote namespace and Running") + } + }) + + Describe("Primary-Remote - Multi-Network configuration", func() { + // Test the Primary-Remote - Multi-Network configuration for each supported Istio version + for _, version := range supportedversion.List { + // The Primary-Remote - Multi-Network configuration is only supported in Istio 1.23 and later + if version.Major < 1 || (version.Major == 1 && version.Minor < 23) { + continue + } + + Context("Istio version is: "+version.Version, func() { + When("Istio resources are created in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be created") + Expect(kubectl.CreateNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be created") + + // Push the intermediate CA to both clusters + Expect(certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig, "east", "network1", artifacts, clPrimary)). + To(Succeed(), "Error pushing intermediate CA to Primary Cluster") + Expect(certs.PushIntermediateCA(controlPlaneNamespace, kubeconfig2, "west", "network2", artifacts, clRemote)). + To(Succeed(), "Error pushing intermediate CA to Remote Cluster") + + // Wait for the secret to be created in both clusters + Eventually(func() error { + _, err := common.GetObject(context.Background(), clPrimary, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Primary Cluster") + + Eventually(func() error { + _, err := common.GetObject(context.Background(), clRemote, kube.Key("cacerts", controlPlaneNamespace), &corev1.Secret{}) + return err + }).ShouldNot(HaveOccurred(), "Secret is not created on Primary Cluster") + + PrimaryYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: Istio +metadata: + name: default +spec: + version: %s + namespace: %s + values: + pilot: + env: + EXTERNAL_ISTIOD: "true" + global: + meshID: %s + multiCluster: + clusterName: %s + network: %s` + multiclusterPrimaryYAML := fmt.Sprintf(PrimaryYAML, version.Name, controlPlaneNamespace, "mesh1", "cluster1", "network1") + Log("Istio CR Primary: ", multiclusterPrimaryYAML) + Expect(kubectl.CreateFromString(multiclusterPrimaryYAML, kubeconfig)).To(Succeed(), "Istio Resource creation failed on Primary Cluster") + }) + + It("updates Istio CR on Primary cluster status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(istioName), &v1alpha1.Istio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Primary; unexpected Condition") + Success("Istio CR is Ready on Primary Cluster") + }) + + It("deploys istiod", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Istiod is not Available on Primary; unexpected Condition") + Expect(common.GetVersionFromIstiod()).To(Equal(version.Version), "Unexpected istiod version") + Success("Istiod is deployed in the namespace and Running on Primary Cluster") + }) + }) + + When("Gateway is created on Primary cluster ", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.Apply(controlPlaneNamespace, eastGatewayYAML, kubeconfig)).To(Succeed(), "Gateway creation failed on Primary Cluster") + + // Expose istiod service in Primary cluster + Expect(kubectl.Apply(controlPlaneNamespace, exposeIstiodYAML, kubeconfig)).To(Succeed(), "Expose Istiod creation failed on Primary Cluster") + + // Expose the Gateway service in both clusters + Expect(kubectl.Apply(controlPlaneNamespace, exposeServiceYAML, kubeconfig)).To(Succeed(), "Expose Service creation failed on Primary Cluster") + }) + + It("updates Gateway status to Available", func(ctx SpecContext) { + Eventually((common.GetObject)). + WithArguments(ctx, clPrimary, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Primary; unexpected Condition") + }) + }) + + When("RemoteIstio is created in Remote cluster", func() { + BeforeAll(func(ctx SpecContext) { + RemoteYAML := ` +apiVersion: sailoperator.io/v1alpha1 +kind: RemoteIstio +metadata: + name: default +spec: + version: %s + namespace: istio-system + values: + istiodRemote: + injectionPath: /inject/cluster/remote/net/network2 + global: + remotePilotAddress: %s` + + remotePilotAddress, err := common.GetSVCLoadBalancerAddress(ctx, clPrimary, controlPlaneNamespace, "istio-eastwestgateway") + Expect(remotePilotAddress).NotTo(BeEmpty(), "Remote Pilot Address is empty") + Expect(err).NotTo(HaveOccurred(), "Error getting Remote Pilot Address") + remoteIstioYAML := fmt.Sprintf(RemoteYAML, version.Name, remotePilotAddress) + Log("RemoteIstio CR: ", remoteIstioYAML) + By("Creating RemoteIstio CR on Remote Cluster") + Expect(kubectl.CreateFromString(remoteIstioYAML, kubeconfig2)).To(Succeed(), "RemoteIstio Resource creation failed on Remote Cluster") + + // Set the controlplane cluster and network for Remote namespace + By("Patching the istio-system namespace on Remote Cluster") + Expect( + kubectl.Patch("", + "namespace", + controlPlaneNamespace, + "merge", + `{"metadata":{"annotations":{"topology.istio.io/controlPlaneClusters":"cluster1"}}}`, + kubeconfig2)). + To(Succeed(), "Error patching istio-system namespace") + Expect( + kubectl.Patch("", + "namespace", + controlPlaneNamespace, + "merge", + `{"metadata":{"labels":{"topology.istio.io/network":"network2"}}}`, + kubeconfig2)). + To(Succeed(), "Error patching istio-system namespace") + + // To be able to access the remote cluster from the primary cluster, we need to create a secret in the primary cluster + // RemoteIstio resource will not be Ready until the secret is created + // Get the internal IP of the control plane node in Remote cluster + internalIPRemote, err := kubectl.GetInternalIP("node-role.kubernetes.io/control-plane", kubeconfig2) + Expect(internalIPRemote).NotTo(BeEmpty(), "Internal IP is empty for Remote Cluster") + Expect(err).NotTo(HaveOccurred()) + + // Wait for the RemoteIstio CR to be created, this can be moved to a condition verification, but the resource it not will be Ready at this point + time.Sleep(5 * time.Second) + + // Install a remote secret in Primary cluster that provides access to the Remote cluster API server. + By("Creating Remote Secret on Primary Cluster") + secret, err := istioctl.CreateRemoteSecret(kubeconfig2, "remote", internalIPRemote) + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl.ApplyString("", secret, kubeconfig)).To(Succeed(), "Remote secret creation failed on Primary Cluster") + }) + + It("secret is created", func(ctx SpecContext) { + secret, err := common.GetObject(ctx, clPrimary, kube.Key("istio-remote-secret-remote", controlPlaneNamespace), &corev1.Secret{}) + Expect(err).NotTo(HaveOccurred()) + Expect(secret).NotTo(BeNil(), "Secret is not created on Primary Cluster") + Success("Remote secret is created in Primary cluster") + }) + + It("updates RemoteIstio CR status to Ready", func(ctx SpecContext) { + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(istioName), &v1alpha1.RemoteIstio{}). + Should(HaveCondition(v1alpha1.IstioConditionReady, metav1.ConditionTrue), "Istio is not Ready on Remote; unexpected Condition") + Success("RemoteIstio CR is Ready on Remote Cluster") + }) + }) + + When("gateway is created in Remote cluster", func() { + BeforeAll(func(ctx SpecContext) { + Expect(kubectl.Apply(controlPlaneNamespace, westGatewayYAML, kubeconfig2)).To(Succeed(), "Gateway creation failed on Remote Cluster") + Success("Gateway is created in Remote cluster") + }) + + It("updates Gateway status to Available", func(ctx SpecContext) { + Eventually((common.GetObject)). + WithArguments(ctx, clRemote, kube.Key("istio-eastwestgateway", controlPlaneNamespace), &appsv1.Deployment{}). + Should(HaveCondition(appsv1.DeploymentAvailable, metav1.ConditionTrue), "Gateway is not Ready on Remote; unexpected Condition") + Success("Gateway is created and available in Remote cluster") + }) + }) + + When("sample apps are deployed in both clusters", func() { + BeforeAll(func(ctx SpecContext) { + // Deploy the sample app in both clusters + deploySampleApp("sample", version, kubeconfig, kubeconfig2) + Success("Sample app is deployed in both clusters") + }) + + It("updates the pods status to Ready", func(ctx SpecContext) { + samplePodsPrimary := &corev1.PodList{} + + clPrimary.List(ctx, samplePodsPrimary, client.InNamespace("sample")) + Expect(samplePodsPrimary.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsPrimary.Items { + Eventually(common.GetObject). + WithArguments(ctx, clPrimary, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Primary; unexpected Condition") + } + + samplePodsRemote := &corev1.PodList{} + clRemote.List(ctx, samplePodsRemote, client.InNamespace("sample")) + Expect(samplePodsRemote.Items).ToNot(BeEmpty(), "No pods found in bookinfo namespace") + + for _, pod := range samplePodsRemote.Items { + Eventually(common.GetObject). + WithArguments(ctx, clRemote, kube.Key(pod.Name, "sample"), &corev1.Pod{}). + Should(HaveCondition(corev1.PodReady, metav1.ConditionTrue), "Pod is not Ready on Remote; unexpected Condition") + } + Success("Sample app is created in both clusters and Running") + }) + + It("can access the sample app from both clusters", func(ctx SpecContext) { + sleepPodNamePrimary, err := common.GetPodNameByLabel(ctx, clPrimary, "sample", "app", "sleep") + Expect(sleepPodNamePrimary).NotTo(BeEmpty(), "Sleep pod not found on Primary Cluster") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Primary Cluster") + + sleepPodNameRemote, err := common.GetPodNameByLabel(ctx, clRemote, "sample", "app", "sleep") + Expect(sleepPodNameRemote).NotTo(BeEmpty(), "Sleep pod not found on Remote Cluster") + Expect(err).NotTo(HaveOccurred(), "Error getting sleep pod name on Remote Cluster") + + // Run the curl command from the sleep pod in the Remote Cluster and get response list to validate that we get responses from both clusters + remoteResponses := strings.Join(getListCurlResponses(sleepPodNameRemote, kubeconfig2), "\n") + Expect(remoteResponses).To(ContainSubstring("Hello version: v1"), "Responses from Remote Cluster are not the expected") + Expect(remoteResponses).To(ContainSubstring("Hello version: v2"), "Responses from Remote Cluster are not the expected") + + // Run the curl command from the sleep pod in the Primary Cluster and get response list to validate that we get responses from both clusters + primaryResponses := strings.Join(getListCurlResponses(sleepPodNamePrimary, kubeconfig), "\n") + Expect(primaryResponses).To(ContainSubstring("Hello version: v1"), "Responses from Primary Cluster are not the expected") + Expect(primaryResponses).To(ContainSubstring("Hello version: v2"), "Responses from Primary Cluster are not the expected") + Success("Sample app is accessible from both clusters") + }) + }) + + When("Istio CR and RemoteIstio CR are deleted in both clusters", func() { + BeforeEach(func() { + Expect(kubectl.Delete(controlPlaneNamespace, "istio", istioName, kubeconfig)).To(Succeed(), "Istio CR failed to be deleted") + Expect(kubectl.Delete(controlPlaneNamespace, "remoteistio", istioName, kubeconfig2)).To(Succeed(), "RemoteIstio CR failed to be deleted") + Success("Istio and RemoteIstio are deleted") + }) + + It("removes istiod on Primary", func(ctx SpecContext) { + Eventually(clPrimary.Get).WithArguments(ctx, kube.Key("istiod", controlPlaneNamespace), &appsv1.Deployment{}). + Should(ReturnNotFoundError(), "Istiod should not exist anymore") + Success("Istiod is deleted on Primary Cluster") + }) + }) + + AfterAll(func(ctx SpecContext) { + // Delete namespace to ensure clean up for new tests iteration + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectl.DeleteNamespace(controlPlaneNamespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + + common.CheckNamespaceEmpty(ctx, clPrimary, controlPlaneNamespace) + common.CheckNamespaceEmpty(ctx, clRemote, controlPlaneNamespace) + Success("ControlPlane Namespaces are empty") + + // Delete the entire sample namespace in both clusters + Expect(kubectl.DeleteNamespace("sample", kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectl.DeleteNamespace("sample", kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + + common.CheckNamespaceEmpty(ctx, clPrimary, "sample") + common.CheckNamespaceEmpty(ctx, clRemote, "sample") + Success("Sample app is deleted in both clusters") + }) + }) + } + }) + + AfterAll(func(ctx SpecContext) { + // Delete the Sail Operator from both clusters + Expect(kubectl.DeleteNamespace(namespace, kubeconfig)).To(Succeed(), "Namespace failed to be deleted on Primary Cluster") + Expect(kubectl.DeleteNamespace(namespace, kubeconfig2)).To(Succeed(), "Namespace failed to be deleted on Remote Cluster") + + // Check that the namespace is empty + common.CheckNamespaceEmpty(ctx, clPrimary, namespace) + common.CheckNamespaceEmpty(ctx, clRemote, namespace) + }) +}) diff --git a/tests/e2e/multicluster/multicluster_suite_test.go b/tests/e2e/multicluster/multicluster_suite_test.go new file mode 100644 index 000000000..5c0cd061a --- /dev/null +++ b/tests/e2e/multicluster/multicluster_suite_test.go @@ -0,0 +1,95 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package multicluster + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/certs" + k8sclient "github.com/istio-ecosystem/sail-operator/tests/e2e/util/client" + env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + clPrimary client.Client + clRemote client.Client + err error + ocp = env.GetBool("OCP", false) + namespace = env.Get("NAMESPACE", "sail-operator") + deploymentName = env.Get("DEPLOYMENT_NAME", "sail-operator") + controlPlaneNamespace = env.Get("CONTROL_PLANE_NS", "istio-system") + istioName = env.Get("ISTIO_NAME", "default") + image = env.Get("IMAGE", "quay.io/maistra-dev/sail-operator:latest") + skipDeploy = env.GetBool("SKIP_DEPLOY", false) + multicluster = env.GetBool("MULTICLUSTER", false) + kubeconfig = env.Get("KUBECONFIG", "") + kubeconfig2 = env.Get("KUBECONFIG2", "") + artifacts = env.Get("ARTIFACTS", "/tmp/artifacts") + + eastGatewayYAML string + westGatewayYAML string + exposeServiceYAML string + exposeIstiodYAML string +) + +func TestInstall(t *testing.T) { + if !multicluster { + t.Skip("Skipping test. Only valid for multicluster") + } + if ocp { + t.Skip("Skipping test. Not valid for OCP") + // TODO: Implement the steps to run the test on OCP + } + RegisterFailHandler(Fail) + setup(t) + RunSpecs(t, "Control Plane Suite") +} + +func setup(t *testing.T) { + GinkgoWriter.Println("************ Running Setup ************") + + GinkgoWriter.Println("Initializing k8s client") + clPrimary, err = k8sclient.InitK8sClient(kubeconfig) + clRemote, err = k8sclient.InitK8sClient(kubeconfig2) + if err != nil { + t.Fatalf("Error initializing k8s client: %v", err) + } + + err := certs.CreateIntermediateCA(artifacts) + if err != nil { + t.Fatalf("Error creating intermediate CA: %v", err) + } + + // Set the path for the multicluster YAML files to be used + workDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting working directory: %v", err) + } + + // Set base path + baseRepoDir := filepath.Join(workDir, "../../..") + eastGatewayYAML = fmt.Sprintf("%s/docs/multicluster/east-west-gateway-net1.yaml", baseRepoDir) + westGatewayYAML = fmt.Sprintf("%s/docs/multicluster/east-west-gateway-net2.yaml", baseRepoDir) + exposeServiceYAML = fmt.Sprintf("%s/docs/multicluster/expose-services.yaml", baseRepoDir) + exposeIstiodYAML = fmt.Sprintf("%s/docs/multicluster/expose-istiod.yaml", baseRepoDir) +} diff --git a/tests/e2e/operator/operator_suite_test.go b/tests/e2e/operator/operator_suite_test.go index ebe1c972d..32b737594 100644 --- a/tests/e2e/operator/operator_suite_test.go +++ b/tests/e2e/operator/operator_suite_test.go @@ -33,9 +33,13 @@ var ( image = env.Get("IMAGE", "quay.io/maistra-dev/sail-operator:latest") namespace = env.Get("NAMESPACE", "sail-operator") deploymentName = env.Get("DEPLOYMENT_NAME", "sail-operator") + multicluster = env.GetBool("MULTICLUSTER", false) ) func TestInstall(t *testing.T) { + if multicluster { + t.Skip("Skipping test for multicluster") + } RegisterFailHandler(Fail) setup() RunSpecs(t, "Install Operator Suite") @@ -46,7 +50,7 @@ func setup() { GinkgoWriter.Println("Initializing k8s client") var err error - cl, err = k8sclient.InitK8sClient() + cl, err = k8sclient.InitK8sClient("") Expect(err).NotTo(HaveOccurred()) if ocp { diff --git a/tests/e2e/util/certs/certs.go b/tests/e2e/util/certs/certs.go new file mode 100644 index 000000000..78aaaa004 --- /dev/null +++ b/tests/e2e/util/certs/certs.go @@ -0,0 +1,280 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certs + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/istio-ecosystem/sail-operator/pkg/kube" + common "github.com/istio-ecosystem/sail-operator/tests/e2e/util/common" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/kubectl" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/shell" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CreateIntermediateCA creates the intermediate CA +func CreateIntermediateCA(basePath string) error { + certsDir := filepath.Join(basePath, "certs") + + // Create the certs directory + err := os.MkdirAll(certsDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create certs directory: %w", err) + } + + // Create the root CA configuration file + err = createRootCAConf(certsDir) + if err != nil { + return fmt.Errorf("failed to create root-ca.conf: %w", err) + } + + // Step 1: Generate root-key.pem + rootKey := filepath.Join(certsDir, "root-key.pem") + _, err = shell.ExecuteCommand(fmt.Sprintf("openssl genrsa -out %s 4096", rootKey)) + if err != nil { + return fmt.Errorf("failed to generate root-key.pem: %w", err) + } + + // Step 2: Generate root-cert.csr using root-key.pem and root-ca.conf + rootCSR := filepath.Join(certsDir, "root-cert.csr") + rootConf := filepath.Join(certsDir, "root-ca.conf") // You'll need to ensure root-ca.conf exists + _, err = shell.ExecuteCommand(fmt.Sprintf("openssl req -sha256 -new -key %s -config %s -out %s", rootKey, rootConf, rootCSR)) + if err != nil { + return fmt.Errorf("failed to generate root-cert.csr: %w", err) + } + + // Step 3: Generate root-cert.pem + rootCert := filepath.Join(certsDir, "root-cert.pem") + _, err = shell.ExecuteCommand( + fmt.Sprintf("openssl x509 -req -sha256 -days 3650 -signkey %s -extensions req_ext -extfile %s -in %s -out %s", + rootKey, rootConf, rootCSR, rootCert)) + if err != nil { + return fmt.Errorf("failed to generate root-cert.pem: %w", err) + } + + // Step 4: Generate east-cacerts (self-signed intermediate certificates) + // Create directories for east and west if needed + eastDir := filepath.Join(certsDir, "east") + westDir := filepath.Join(certsDir, "west") + + // Create the east and west directories + err = os.MkdirAll(eastDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create east directory: %w", err) + } + err = os.MkdirAll(westDir, 0o755) + if err != nil { + return fmt.Errorf("failed to create west directory: %w", err) + } + + // Create the intermediate CA configuration file + err = createIntermediateCAConf(eastDir) + if err != nil { + return fmt.Errorf("failed to create ca.conf on east dir: %w", err) + } + + err = createIntermediateCAConf(westDir) + if err != nil { + return fmt.Errorf("failed to create ca.conf on west dir: %w", err) + } + + err = generateIntermediateCACertificates(eastDir, rootCert, rootKey) + if err != nil { + return fmt.Errorf("failed to generate east intermediate CA certificates: %w", err) + } + + err = generateIntermediateCACertificates(westDir, rootCert, rootKey) + if err != nil { + return fmt.Errorf("failed to generate west intermediate CA certificates: %w", err) + } + + return nil +} + +func generateIntermediateCACertificates(dir string, rootCert string, rootKey string) error { + caKey := filepath.Join(dir, "ca-key.pem") + _, err := shell.ExecuteCommand(fmt.Sprintf("openssl genrsa -out %s 4096", caKey)) + if err != nil { + return fmt.Errorf("failed to generate east-ca-key.pem: %w", err) + } + + caCSR := filepath.Join(dir, "ca-cert.csr") + caConf := filepath.Join(dir, "ca.conf") + _, err = shell.ExecuteCommand(fmt.Sprintf("openssl req -sha256 -new -config %s -key %s -out %s", caConf, caKey, caCSR)) + if err != nil { + return fmt.Errorf("failed to generate east-ca-cert.csr: %w", err) + } + + caCert := filepath.Join(dir, "ca-cert.pem") + _, err = shell.ExecuteCommand( + fmt.Sprintf("openssl x509 -req -sha256 -days 3650 -CA %s -CAkey %s -CAcreateserial -extensions req_ext -extfile %s -in %s -out %s", + rootCert, rootKey, caConf, caCSR, caCert)) + if err != nil { + return fmt.Errorf("failed to generate east-ca-cert.pem: %w", err) + } + + certChain := filepath.Join(dir, "cert-chain.pem") + _, err = shell.ExecuteCommand(fmt.Sprintf("cat %s %s > %s", caCert, rootCert, certChain)) + if err != nil { + return fmt.Errorf("failed to generate east-cert-chain.pem: %w", err) + } + + return nil +} + +// createRootCAConf creates the root CA configuration file +func createRootCAConf(certsDir string) error { + confPath := filepath.Join(certsDir, "root-ca.conf") + confContent := ` +[ req ] +encrypt_key = no +prompt = no +utf8 = yes +default_md = sha256 +default_bits = 4096 +req_extensions = req_ext +x509_extensions = req_ext +distinguished_name = req_dn + +[ req_ext ] +subjectKeyIdentifier = hash +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, nonRepudiation, keyEncipherment, keyCertSign + +[ req_dn ] +O = Istio +CN = Root CA +` + + // Write the configuration file to the directory + return writeFile(confPath, confContent) +} + +// createIntermediateCAConf creates the intermediate CA configuration file +func createIntermediateCAConf(certsDir string) error { + confPath := filepath.Join(certsDir, "ca.conf") + confContent := fmt.Sprintf(` +[ req ] +encrypt_key = no +prompt = no +utf8 = yes +default_md = sha256 +default_bits = 4096 +req_extensions = req_ext +x509_extensions = req_ext +distinguished_name = req_dn + +[ req_ext ] +subjectKeyIdentifier = hash +basicConstraints = critical, CA:true, pathlen:0 +keyUsage = critical, digitalSignature, nonRepudiation, keyEncipherment, keyCertSign +subjectAltName=@san + +[ san ] +DNS.1 = istiod.istio-system.svc + +[ req_dn ] +O = Istio +CN = Intermediate CA +L = %s +`, confPath) + + // Write the configuration file to the directory + return writeFile(confPath, confContent) +} + +// writeFile writes the content to the file +func writeFile(confPath string, confContent string) error { + file, err := os.Create(confPath) + if err != nil { + return fmt.Errorf("failed to create %s: %v", confPath, err) + } + defer file.Close() + + _, err = file.WriteString(confContent) + if err != nil { + return fmt.Errorf("failed to write to %s: %v", confPath, err) + } + + return nil +} + +// PushIntermediateCA pushes the intermediate CA to the cluster +func PushIntermediateCA(ns, kubeconfig, zone, network, basePath string, cl client.Client) error { + // Set cert dir + certDir := filepath.Join(basePath, "certs") + + // Check if the secret exists in the cluster + _, err := common.GetObject(context.Background(), cl, kube.Key("cacerts", ns), &corev1.Secret{}) + if err != nil { + // Label the namespace with the network + err = kubectl.Patch("", "namespace", ns, "merge", `{"metadata":{"labels":{"topology.istio.io/network":"`+network+`"}}}`, kubeconfig) + if err != nil { + return fmt.Errorf("failed to label namespace: %w", err) + } + + // Read the pem content from the files + caCertPath := filepath.Join(certDir, zone, "ca-cert.pem") + caKeyPath := filepath.Join(certDir, zone, "ca-key.pem") + rootCertPath := filepath.Join(certDir, "root-cert.pem") + certChainPath := filepath.Join(certDir, zone, "cert-chain.pem") + + // Read the pem content from the files to create the secret + caCert, err := os.ReadFile(caCertPath) + if err != nil { + return fmt.Errorf("failed to read ca-cert.pem: %w", err) + } + caKey, err := os.ReadFile(caKeyPath) + if err != nil { + return fmt.Errorf("failed to read ca-key.pem: %w", err) + } + rootCert, err := os.ReadFile(rootCertPath) + if err != nil { + return fmt.Errorf("failed to read root-cert.pem: %w", err) + } + certChain, err := os.ReadFile(certChainPath) + if err != nil { + return fmt.Errorf("failed to read cert-chain.pem: %w", err) + } + + // Create the secret by using the client in the cluster and the files created in the setup + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cacerts", + Namespace: ns, + }, + Data: map[string][]byte{ + "ca-cert.pem": caCert, + "ca-key.pem": caKey, + "root-cert.pem": rootCert, + "cert-chain.pem": certChain, + }, + } + + err = cl.Create(context.Background(), secret) + if err != nil { + return fmt.Errorf("failed to create secret: %w", err) + } + } + + return nil +} diff --git a/tests/e2e/util/client/client.go b/tests/e2e/util/client/client.go index 9ab45ee74..efebdad0f 100644 --- a/tests/e2e/util/client/client.go +++ b/tests/e2e/util/client/client.go @@ -27,8 +27,17 @@ import ( ) // getConfig returns the configuration of the kubernetes go-client -func getConfig() (*rest.Config, error) { - // use the current context in kubeconfig +func getConfig(kubeconfig string) (*rest.Config, error) { + // If kubeconfig is provided, use it + if kubeconfig != "" { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("error building config: %w", err) + } + + return config, nil + } + // If not kubeconfig is provided use the current context in kubeconfig config, err := clientcmd.BuildConfigFromFlags("", os.Getenv("KUBECONFIG")) if err != nil { return nil, fmt.Errorf("error building config: %w", err) @@ -38,8 +47,11 @@ func getConfig() (*rest.Config, error) { } // InitK8sClient returns the kubernetes clientset -func InitK8sClient() (client.Client, error) { - config, err := getConfig() +// Arguments: +// Kubeconfig: string +// Set kubeconfig to "" to use the current context in kubeconfig +func InitK8sClient(kubeconfig string) (client.Client, error) { + config, err := getConfig(kubeconfig) if err != nil { return nil, fmt.Errorf("error getting config for k8s client: %w", err) } diff --git a/tests/e2e/util/common/e2e_utils.go b/tests/e2e/util/common/e2e_utils.go index a787a4b49..e17e07686 100644 --- a/tests/e2e/util/common/e2e_utils.go +++ b/tests/e2e/util/common/e2e_utils.go @@ -18,6 +18,8 @@ package common import ( "context" + "fmt" + "regexp" "strings" "time" @@ -39,6 +41,12 @@ var ( istioName = env.Get("ISTIO_NAME", "default") istioCniName = env.Get("ISTIOCNI_NAME", "default") istioCniNamespace = env.Get("ISTIOCNI_NAMESPACE", "istio-cni") + + // version can have one of the following formats: + // - 1.22.2 + // - 1.23.0-rc.1 + // - 1.24-alpha + istiodVersionRegex = regexp.MustCompile(`Version:"(\d+\.\d+(\.\d+)?(-\w+(\.\d+)?)?)`) ) // getObject returns the object with the given key @@ -53,6 +61,36 @@ func GetList(ctx context.Context, cl client.Client, list client.ObjectList, opts return list, err } +// GetPodNameByLabel returns the name of the pod with the given label +func GetPodNameByLabel(ctx context.Context, cl client.Client, ns, labelKey, labelValue string) (string, error) { + podList := &corev1.PodList{} + err := cl.List(ctx, podList, client.InNamespace(ns), client.MatchingLabels{labelKey: labelValue}) + if err != nil { + return "", err + } + if len(podList.Items) == 0 { + return "", fmt.Errorf("no pod found with label %s=%s", labelKey, labelValue) + } + return podList.Items[0].Name, nil +} + +// GetSVCAddress returns the address of the service with the given name +func GetSVCLoadBalancerAddress(ctx context.Context, cl client.Client, ns, svcName string) (string, error) { + svc := &corev1.Service{} + err := cl.Get(ctx, client.ObjectKey{Namespace: ns, Name: svcName}, svc) + if err != nil { + return "", err + } + + // To avoid flakiness, wait for the LoadBalancer to be ready + Eventually(func() ([]corev1.LoadBalancerIngress, error) { + err := cl.Get(ctx, client.ObjectKey{Namespace: ns, Name: svcName}, svc) + return svc.Status.LoadBalancer.Ingress, err + }, "1m", "1s").ShouldNot(BeEmpty(), "LoadBalancer should be ready") + + return svc.Status.LoadBalancer.Ingress[0].IP, nil +} + // checkNamespaceEmpty checks if the given namespace is empty func CheckNamespaceEmpty(ctx SpecContext, cl client.Client, ns string) { // TODO: Check to add more validations @@ -107,7 +145,7 @@ func logOperatorDebugInfo() { logDebugElement("Events in "+namespace, events, err) // Temporaty information to gather more details about failure - pods, err := kubectl.GetPods(namespace, "-o wide") + pods, err := kubectl.GetPods(namespace, "", "-o wide") logDebugElement("Pods in "+namespace, pods, err) describe, err := kubectl.Describe(namespace, "deployment", deploymentName) @@ -118,7 +156,7 @@ func logIstioDebugInfo() { resource, err := kubectl.GetYAML("", "istio", istioName) logDebugElement("Istio YAML", resource, err) - output, err := kubectl.GetPods(controlPlaneNamespace, "-o wide") + output, err := kubectl.GetPods(controlPlaneNamespace, "", "-o wide") logDebugElement("Pods in "+controlPlaneNamespace, output, err) logs, err := kubectl.Logs(controlPlaneNamespace, "deploy/istiod", ptr.Of(120*time.Second)) @@ -139,7 +177,7 @@ func logCNIDebugInfo() { logDebugElement("Events in "+istioCniNamespace, events, err) // Temporaty information to gather more details about failure - pods, err := kubectl.GetPods(istioCniNamespace, "-o wide") + pods, err := kubectl.GetPods(istioCniNamespace, "", "-o wide") logDebugElement("Pods in "+istioCniNamespace, pods, err) describe, err := kubectl.Describe(istioCniNamespace, "daemonset", "istio-cni-node") @@ -155,3 +193,16 @@ func logDebugElement(caption string, info string, err error) { GinkgoWriter.Println(indent + strings.ReplaceAll(strings.TrimSpace(info), "\n", "\n"+indent)) } } + +func GetVersionFromIstiod() (string, error) { + output, err := kubectl.Exec(controlPlaneNamespace, "deploy/istiod", "", "pilot-discovery version") + if err != nil { + return "", fmt.Errorf("error getting version from istiod: %w", err) + } + + matches := istiodVersionRegex.FindStringSubmatch(output) + if len(matches) > 1 && matches[1] != "" { + return matches[1], nil + } + return "", fmt.Errorf("error getting version from istiod: version not found in output: %s", output) +} diff --git a/tests/e2e/util/istioctl/istioctl.go b/tests/e2e/util/istioctl/istioctl.go new file mode 100644 index 000000000..e055f5828 --- /dev/null +++ b/tests/e2e/util/istioctl/istioctl.go @@ -0,0 +1,55 @@ +//go:build e2e + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR Condition OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package istioctl + +import ( + "fmt" + + env "github.com/istio-ecosystem/sail-operator/tests/e2e/util/env" + "github.com/istio-ecosystem/sail-operator/tests/e2e/util/shell" +) + +var istioctlBinary = env.Get("ISTIOCTL_PATH", "istioctl") + +// Istioctl returns the istioctl command +// If the environment variable COMMAND is set, it will return the value of COMMAND +// Otherwise, it will return the default value "istioctl" as default +// Arguments: +// - format: format of the command without istioctl +// - args: arguments of the command +func istioctl(format string, args ...interface{}) string { + binary := "istioctl" + if istioctlBinary != "" { + binary = istioctlBinary + } + + cmd := fmt.Sprintf(format, args...) + + return fmt.Sprintf("%s %s", binary, cmd) +} + +// CreateRemoteSecret creates a secret in the remote cluster +// Arguments: +// - remoteKubeconfig: kubeconfig of the remote cluster +// - secretName: name of the secret +// - internalIP: internal IP of the remote cluster +func CreateRemoteSecret(remoteKubeconfig string, secretName string, internalIP string) (string, error) { + cmd := istioctl("create-remote-secret --kubeconfig %s --name %s --server=https://%s:6443", remoteKubeconfig, secretName, internalIP) + yaml, err := shell.ExecuteCommand(cmd) + + return yaml, err +} diff --git a/tests/e2e/util/kubectl/kubectl.go b/tests/e2e/util/kubectl/kubectl.go index 7d4e8f054..931470851 100644 --- a/tests/e2e/util/kubectl/kubectl.go +++ b/tests/e2e/util/kubectl/kubectl.go @@ -25,6 +25,14 @@ import ( const DefaultBinary = "kubectl" +// optionalKubeconfig add the flag --kubeconfig if the kubeconfig is set +func optionalKubeconfig(kubeconfig []string) string { + if len(kubeconfig) > 0 && kubeconfig[0] != "" { + return fmt.Sprintf("--kubeconfig %s", kubeconfig[0]) + } + return "" +} + // kubectl return the kubectl command // If the environment variable COMMAND is set, it will return the value of COMMAND // Otherwise, it will return the default value "kubectl" as default @@ -41,8 +49,8 @@ func kubectl(format string, args ...interface{}) string { } // CreateFromString creates a resource from the given yaml string -func CreateFromString(yamlString string) error { - cmd := kubectl("create -f -") +func CreateFromString(yamlString string, kubeconfig ...string) error { + cmd := kubectl("create %s -f -", optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommandWithInput(cmd, yamlString) if err != nil { return fmt.Errorf("error creating resource from yaml: %w", err) @@ -51,8 +59,15 @@ func CreateFromString(yamlString string) error { } // ApplyString applies the given yaml string to the cluster -func ApplyString(ns, yamlString string) error { - cmd := kubectl("apply -n %s --server-side -f -", ns) +func ApplyString(ns, yamlString string, kubeconfig ...string) error { + nsflag := nsflag(ns) + // If the namespace is empty, we need to remove the flag because it will fail + // TODO: improve the nsflag function to handle this case + if ns == "" { + nsflag = "" + } + + cmd := kubectl("apply %s %s --server-side -f -", nsflag, optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommandWithInput(cmd, yamlString) if err != nil { return fmt.Errorf("error applying yaml: %w", err) @@ -62,8 +77,14 @@ func ApplyString(ns, yamlString string) error { } // Apply applies the given yaml file to the cluster -func Apply(ns, yamlFile string) error { - cmd := kubectl("apply -n %s -f %s", ns, yamlFile) +func Apply(ns, yamlFile string, kubeconfig ...string) error { + err := ApplyWithLabels(ns, yamlFile, "", kubeconfig...) + return err +} + +// ApplyWithLabels applies the given yaml file to the cluster with the given labels +func ApplyWithLabels(ns, yamlFile string, label string, kubeconfig ...string) error { + cmd := kubectl("apply -n %s %s -f %s %s", ns, labelFlag(label), yamlFile, optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error applying yaml: %w", err) @@ -72,10 +93,24 @@ func Apply(ns, yamlFile string) error { return nil } +// DeleteFromFile deletes a resource from the given yaml file +func DeleteFromFile(yamlFile string, kubeconfig ...string) error { + cmd := kubectl("delete -f %s %s", yamlFile, optionalKubeconfig(kubeconfig)) + _, err := shell.ExecuteCommand(cmd) + if err != nil { + return fmt.Errorf("error deleting resource from yaml: %w", err) + } + + return nil +} + // CreateNamespace creates a namespace // If the namespace already exists, it will return nil -func CreateNamespace(ns string) error { - cmd := kubectl("create namespace %s", ns) +// Arguments: +// - ns: namespace +// - kubeconfig: optional kubeconfig to set the target file +func CreateNamespace(ns string, kubeconfig ...string) error { + cmd := kubectl("create namespace %s %s", ns, optionalKubeconfig(kubeconfig)) output, err := shell.ExecuteCommand(cmd) if err != nil { if strings.Contains(output, "AlreadyExists") { @@ -89,8 +124,11 @@ func CreateNamespace(ns string) error { } // DeleteNamespace deletes a namespace -func DeleteNamespace(ns string) error { - cmd := kubectl("delete namespace %s", ns) +// Arguments: +// - ns: namespace +// - kubeconfig: optional kubeconfig to set the target file +func DeleteNamespace(ns string, kubeconfig ...string) error { + cmd := kubectl("delete namespace %s %s", ns, optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error deleting namespace: %w", err) @@ -99,9 +137,9 @@ func DeleteNamespace(ns string) error { return nil } -// Delete deletes a resource based on the namespace, kind and the name -func Delete(ns, kind, name string) error { - cmd := kubectl("delete %s %s %s", kind, name, nsflag(ns)) +// Delete deletes a resource based on the namespace, kind and the name. Optionally, you can provide a kubeconfig +func Delete(ns, kind, name string, kubeconfig ...string) error { + cmd := kubectl("delete %s %s %s %s", kind, name, nsflag(ns), optionalKubeconfig(kubeconfig)) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error deleting deployment: %w", err) @@ -124,8 +162,8 @@ func DeleteCRDs(crds []string) error { } // Patch patches a resource. -func Patch(ns, kind, name, patchType, patch string) error { - cmd := kubectl(`patch %s %s %s --type=%s -p=%q`, kind, name, prepend("-n", ns), patchType, patch) +func Patch(ns, kind, name, patchType, patch string, kubeconfig ...string) error { + cmd := kubectl(`patch %s %s %s %s --type=%s -p=%q`, kind, name, prepend("-n", ns), optionalKubeconfig(kubeconfig), patchType, patch) _, err := shell.ExecuteCommand(cmd) if err != nil { return fmt.Errorf("error patching resource: %w", err) @@ -152,8 +190,13 @@ func GetYAML(ns, kind, name string) (string, error) { } // GetPods returns the pods of a namespace -func GetPods(ns string, args ...string) (string, error) { - cmd := kubectl("get pods %s %s", nsflag(ns), strings.Join(args, " ")) +func GetPods(ns string, kubeconfig string, args ...string) (string, error) { + kubeconfigFlag := "" + if kubeconfig != "" { + kubeconfigFlag = fmt.Sprintf("--kubeconfig %s", kubeconfig) + } + + cmd := kubectl("get pods %s %s %s", nsflag(ns), strings.Join(args, " "), kubeconfigFlag) output, err := shell.ExecuteCommand(cmd) if err != nil { return "", fmt.Errorf("error getting pods: %w, output: %s", err, output) @@ -188,6 +231,20 @@ func Describe(ns, kind, name string) (string, error) { return output, nil } +// GetInternalIP returns the internal IP of a node +// Arguments: +// - label: label of the node +// - kubeconfig: optional kubeconfig to set the target file +func GetInternalIP(label string, kubeconfig ...string) (string, error) { + cmd := kubectl("get nodes -l %s -o jsonpath='{.items[0].status.addresses[?(@.type==\"InternalIP\")].address}' %s", label, optionalKubeconfig(kubeconfig)) + output, err := shell.ExecuteCommand(cmd) + if err != nil { + return "", fmt.Errorf("error getting internal IP: %w, output: %s", err, output) + } + + return output, nil +} + // Logs returns the logs of a deployment // Arguments: // - ns: namespace @@ -210,8 +267,8 @@ func sinceFlag(since *time.Duration) string { } // Exec executes a command in the pod or specific container -func Exec(ns, pod, container, command string) (string, error) { - cmd := kubectl("exec %s %s %s -- %s", pod, containerflag(container), nsflag(ns), command) +func Exec(ns, pod, container, command string, kubeconfig ...string) (string, error) { + cmd := kubectl("exec %s %s %s %s -- %s", pod, containerflag(container), nsflag(ns), optionalKubeconfig(kubeconfig), command) output, err := shell.ExecuteCommand(cmd) if err != nil { return "", err @@ -234,6 +291,13 @@ func nsflag(ns string) string { return "-n " + ns } +func labelFlag(label string) string { + if label == "" { + return "" + } + return "-l " + label +} + func containerflag(container string) string { if container == "" { return ""