diff --git a/operators/o2ims-operator/Dockerfile b/operators/o2ims-operator/Dockerfile new file mode 100644 index 00000000..118735fe --- /dev/null +++ b/operators/o2ims-operator/Dockerfile @@ -0,0 +1,27 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## + +FROM python:3.12.9-alpine3.21 AS builder +COPY controllers/ /src/ +COPY requirements.txt / +RUN pip install --user -r /requirements.txt --no-cache-dir +############### Target +FROM python:3.12.9-alpine3.21 AS target +COPY --from=builder /root/.local \ + /src/ \ + /root/.local +ENV PATH=/root/.local/bin:$PATH +CMD ["kopf", "run", "/root/.local/manager.py", "--all-namespaces"] diff --git a/operators/o2ims-operator/README.md b/operators/o2ims-operator/README.md new file mode 100644 index 00000000..a6ca07eb --- /dev/null +++ b/operators/o2ims-operator/README.md @@ -0,0 +1,279 @@ +# Nephio O-RAN O2 IMS Operator + +This operator implements O-RAN O2 IMS for K8s based cloud management. + +## How to start + +### Development Requirements: + +- Python3.11 +- requirements.txt installed in development environment + +### Nephio Management Cluster Requirements: + +- 6 vCPU +- 10Gi RAM + +## Create Development Environment + +### Including Nephio mgmt Cluster + +The following will create a kind cluster and install required components such as: +- Porch +- ConfigSync +- Gitea (available at `172.18.0.200:3000`) +- MetalLB and MetalLB Sandbox Environment +- CAPI +- ConfigSync and RootSync objects to create clusters + +It will also configure a secret which the operator can use for development purposes (when running the operator in non-containerize environments). It creates a pod and appends the `porch-controllers` service account token and redirects it from `/var/run/secrets/kubernetes.io/serviceaccount/token` to `/tmp/porch-token`. + + +```bash +# Get the repository +git clone https://github.com/nephio-project/nephio.git +cd operators/o2ims-operator +# Create a virtual environment +virtualenv venv -p python3 +source venv/bin/activate +# Install requirements +pip install -r requirements.txt +# Set kernel parameters (run these commands after system restart or when new VM/system is created) +sudo sysctl -w fs.inotify.max_user_watches=524288 +sudo sysctl -w fs.inotify.max_user_instances=512 +sudo sysctl -w kernel.keys.maxkeys=500000 +sudo sysctl -w kernel.keys.maxbytes=1000000 +# Run the create-cluster.sh script to create the mgmt cluster and development environment +./tests/create-cluster.sh +``` + +Operator CRD is can be fetched via below command, though the above cluster creation script automatically fetches and apply this CRD. + +```bash +curl --create-dirs -O --output-dir ./config/crd/bases/ https://raw.githubusercontent.com/nephio-project/api/refs/heads/main/config/crd/bases/o2ims.provisioning.oran.org_provisioningrequests.yaml +``` + +### Existing Nephio mgmt Cluster + +#### Non-containerized Development Environment + +```bash +kubectl create -f tests/deployment/sa-test-pod.yaml +kubectl exec -it -n porch-system porch-sa-test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token &> /tmp/porch-token +# Create the CRD from the Nephio API repo +kubectl create -f https://raw.githubusercontent.com/nephio-project/api/refs/heads/main/config/crd/bases/o2ims.provisioning.oran.org_provisioningrequests.yaml +export TOKEN=/tmp/porch-token +# Exposing the Kube proxy for development after killing previous proxy sessions +pkill kubectl +nohup kubectl proxy --port 8080 &>/dev/null & +``` + +#### Containerized Development Environment + +Build a Docker image: + +```bash +docker build -t o2ims:latest -f Dockerfile . +``` + +Push this image in your cluster, here we are using a `kind` cluster so we will push using the below command: + +```bash +kind load docker-image o2ims:latest -n o2ims-mgmt +``` + +`NOTE`: `o2ims-mgmt` is the name of the kind cluster. It is good to mention cluster name if you have multiple clusters. + +Deploy the O2 IMS operator: + +```bash +kpt pkg get --for-deployment https://github.com/nephio-project/catalog.git +/nephio/optional/o2ims@origin/main /tmp/o2ims +kpt fn render /tmp/o2ims +kpt live init /tmp/o2ims +kpt live apply /tmp/o2ims --reconcile-timeout=15m --output=table +``` + +### To Start the Operator: + +Note that there are some constants in manager.py that can be tuned before running the operator. + +```bash +## To run in debug mode use the "--debug" flag or "-v --log-format=full" +kopf run controllers/manager.py +``` + +Open another terminal to provision a cluster: + +```bash +kpt pkg get --for-deployment https://github.com/nephio-project/catalog.git +/nephio/optional/o2ims@origin/main /tmp/o2ims +kpt fn render /tmp/o2ims +kpt live init /tmp/o2ims +kpt live apply /tmp/o2ims --reconcile-timeout=15m --output=table +``` + +### Redeploying + +To redeploy the cluster, or to recreate the development environment, one must delete the created cluster. The Nephio mgmt cluster will be deleted automatically when running `create-cluster.sh`, but the cluster deployed by this operator has a name in the `clusterName` field. For example, it may be `edge`, thus: + +```bash +kind delete cluster -n edge +``` + +## Operator logic + +O2IMS operator listens for ProvisioningRequest CR and once it is created it goes through different stages + +1. `ProvisioningRequest validation`: The controller [provisioning_request_validation_controller.py](./controllers/provisioning_request_validation_controller.py) validates the provisioning requests. Currently it checks if the field `clusterName` and `clusterProvisioner`. At the moment only `capi` handled clusters are support +2. `ProvisioningRequest creation`: The controller [provisioning_request_controller.py](./controllers/provisioning_request_controller.py) takes care of creating the a package variant for Porch which can be applied to the cluster where porch is running. After applying package variant it waits for the cluster to be created and it follows the creation via querying `clusters.cluster.x-k8s.io` endpoint. Later we will add querying of packageRevisions also but at the moment their is a problem with querying packageRevisions because sometimes Porch is not able to process the request + +Output of a **Successful workflow**: + +
+The output is similar to: + +```yaml +apiVersion: o2ims.provisioning.oran.org/v1alpha1 +kind: ProvisioningRequest +metadata: + annotations: + provisioningrequests.o2ims.provisioning.oran.org/kopf-managed: "yes" + provisioningrequests.o2ims.provisioning.oran.org/last-ha-a.A3qw: | + {"spec":{"description":"Provisioning request for setting up a test kind cluster.","name":"test-env-Provisioning","templateName":"nephio-workload-cluster","templateParameters":{"clusterName":"edge","labels":{"nephio.org/region":"europe-paris-west","nephio.org/site-type":"edge"},"templateVersion":"v3.0.0"}} + provisioningrequests.o2ims.provisioning.oran.org/last-handled-configuration: | + {"spec":{"description":"Provisioning request for setting up a test kind cluster.","name":"test-env-Provisioning","templateName":"nephio-workload-cluster","templateParameters":{"clusterName":"edge","labels":{"nephio.org/region":"europe-paris-west","nephio.org/site-type":"edge"},"templateVersion":"v3.0.0"}} + creationTimestamp: "2025-01-31T13:50:46Z" + generation: 1 + name: provisioning-request-sample + resourceVersion: "12122" + uid: e8377db2-5652-4bc6-9632-8ce0836c6afd +spec: + description: Provisioning request for setting up a test kind cluster. + name: test-env-Provisioning + templateName: nephio-workload-cluster + templateParameters: + clusterName: edge + labels: + nephio.org/site-type: edge + nephio.org/region: europe-paris-west + nephio.org/owner: nephio-o2ims + templateVersion: v3.0.0 +status: + provisionedResourceSet: + oCloudInfrastructureResourceIds: + - cb92ece1-7272-4e01-9d5c-11e47b2e2473 + oCloudNodeClusterId: 09470fe4-cff6-4362-a7d6-badc77dbf059 + provisioningStatus: + provisioningMessage: Cluster resource created + provisioningState: fulfilled + provisioningUpdateTime: "2025-01-31T14:52:21Z" +``` + +
+ +## Unit Testing + +Unit tests are contained in the `tests` directory, and are intended to test pieces of the O2IMS Operator in the `controllers` directory. Currently unit tests are not comprehensive, but provide expected coverage of core utility components. + +Prior to running the tests, install the requirements: +```bash +pip3 install -r ./tests/unit_test_requirements.txt +``` + +To run all tests in `test_utils.py` with abridged output: + ```bash +pytest ./tests/test_utils.py +``` + +Output: +```bash +==================================================================== test session starts ==================================================================== +platform linux -- Python 3.13.0, pytest-8.3.4, pluggy-1.5.0 +rootdir: /home/dkosteck/Documents/nephio/operators/o2ims-operator +collected 61 items + +tests/test_utils.py ............................................................. [100%] + +==================================================================== 61 passed in 0.14s ===================================================================== + +``` + +To run with verbose output (showing individual test results): + ```bash +pytest -v ./tests/test_utils.py +``` + +## Known issues + +### Porch Endpoints and Stuck Deployments + +One may notice that the edge cluster is not provisioned, the provisioning request times out, or the package variant claims to be stalled (examples below). This is believed to be a bug in Porch, and so will be fixed upstream. For now a workaround has been identified. + +#### O2IMS Cluster Not Present + +You created the provisioning request but the cluster is not created + +```bash +kind get clusters +mgmt +``` + +#### ProvisioningRequest Timeout + +```bash +kubectl get provisioningrequest provisioning-request-sample -o yaml | grep provisioningStatus: -A 2 + provisioningStatus: + provisioningMessage: Cluster resource creation failed reached timeout + provisioningState: failed +``` + +#### PackageVariant Stalled + +The package variant created by O2IMS is stalled + +```bash +$ kubectl get packagevariant provisioning-request-sample -o yaml | grep conditions: -A 5 + conditions: + - lastTransitionTime: "2025-01-29T22:25:08Z" + message: all validation checks passed + reason: Valid + status: "False" + type: Stalled +``` + +#### Potential Solution + +One may attempt to delete the PackageVariant, ProvisioningRequest, and the Porch Server. After the Porch Server is re-deployed, re-deploy the ProvisioningRequest: + +```bash +## Delete the sample provisioning resource +kubectl delete packagevariant provisioning-request-sample +kubectl delete provisioningrequest provisioning-request-sample +kubectl delete pod porch-server-7c5485b96b-tk7sr -n porch-system # Get the pod name from kubectl +# Once deleted and new Porch Server is up +kubectl create -f tests/sample_provisioning_request.yaml +``` + +### Deletion request O2IMS cluster + +This is not supported so you have to delete the cluster manually + +First delete the provisioning request: + +```bash +kubectl delete -f tests/sample_provisioning_request.yaml +``` + +Then delete the resources, replace **edge** with your cluster name and change **mgmt** cluster repository name with your cluster management cluster repository name. + +```bash +kubectl delete packagevariants -l nephio.org/site-type=edge +kubectl delete packagevariants provisioning-request-sample +pkgList=$(kpt alpha rpkg get| grep edge | grep mgmt| awk '{print $1;}') +for pkg in $pkgList +do + kpt alpha rpkg propose-delete $pkg -ndefault + kpt alpha rpkg delete $pkg -ndefault +done +``` diff --git a/operators/o2ims-operator/controllers/__init__.py b/operators/o2ims-operator/controllers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/operators/o2ims-operator/controllers/manager.py b/operators/o2ims-operator/controllers/manager.py new file mode 100644 index 00000000..4652e5b1 --- /dev/null +++ b/operators/o2ims-operator/controllers/manager.py @@ -0,0 +1,156 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## + +from utils import LOG_LEVEL, CLUSTER_PROVISIONER, CREATION_TIMEOUT +from provisioning_request_controller import * +from provisioning_request_validation_controller import * +from datetime import datetime +import logging +import kopf +import os + + +@kopf.on.startup() +def configure(settings: kopf.OperatorSettings, memo: kopf.Memo, **_): + # OwnerReference + if LOG_LEVEL == "INFO": + settings.posting.level = logging.INFO + if LOG_LEVEL == "ERROR": + settings.posting.level = logging.ERROR + if LOG_LEVEL == "WARNING": + settings.posting.level = logging.WARNING + settings.persistence.finalizer = f"provisioningrequests.o2ims.provisioning.oran.org" + settings.persistence.progress_storage = kopf.AnnotationsProgressStorage( + prefix=f"provisioningrequests.o2ims.provisioning.oran.org" + ) + settings.persistence.diffbase_storage = kopf.AnnotationsDiffBaseStorage( + prefix=f"provisioningrequests.o2ims.provisioning.oran.org", + key="last-handled-configuration", + ) + memo.cluster_provisioner = CLUSTER_PROVISIONER + memo.creation_timeout = CREATION_TIMEOUT + + +## kopf.event is designed to show events in kubectl get events. For clusterscope resources currently it is not possible to show events +@kopf.on.resume(f"o2ims.provisioning.oran.org", "provisioningrequests") +@kopf.on.create(f"o2ims.provisioning.oran.org", "provisioningrequests") +async def create_fn(spec, logger, status, patch: kopf.Patch, memo: kopf.Memo, **kwargs): + metadata_name = kwargs["body"]["metadata"]["name"] + # Template name will be treated as package name + template_name = spec.get("templateName") + # Template version will be treated as repository branch/tag/commit + template_version = spec.get("templateVersion") + template_parameters = spec.get("templateParameters") + kopf.event( + kwargs["body"], + type="Info", + reason="Logging", + message="Provisioning request validation ongoing", + ) + # Check in-case the package variant was manually created + _status = check_creation_request_status(request_name=metadata_name, logger=logger) + if ( + not _status["status"] + and _status["reason"] == "notFound" + and _status["pv"]["status"] + ): + patch.status["provisioningStatus"] = { + "provisioningMessage": "Provisioning request creation failed, package variant already exist", + "provisioningState": "failed", + "provisioningUpdateTime": datetime.now().strftime(TIME_FORMAT), + } + kopf.event( + kwargs["body"], + type="Error", + reason="Logging", + message="Provisioning request creation failed, package variant already exist", + ) + return + + # TODO: This should be done via on.validate handler (admissionwebhooks) + request_validation = validate_cluster_creation_request(params=template_parameters) + + if not request_validation["status"]: + patch.status["provisioningStatus"] = { + "provisioningMessage": "Provisioning request validation failed; reason: " + + request_validation["reason"], + "provisioningState": "failed", + "provisioningUpdateTime": datetime.now().strftime(TIME_FORMAT), + } + kopf.PermanentError( + kwargs["body"], + type="Error", + reason="Logging", + message="Provisioning request validation failed; reason: {request_validation['reason']}", + ) + return + + @kopf.subhandler() + def sub_validations(*, patch, **kwargs): + if request_validation["status"]: + patch.status["provisioningStatus"] = { + "provisioningMessage": "Provisioning request validation done", + "provisioningState": "progressing", + "provisioningUpdateTime": datetime.now().strftime(TIME_FORMAT), + } + kopf.event( + kwargs["body"], + type="Info", + reason="Logging", + message="Provisioning request validation done", + ) + + creation_request_output = cluster_creation_request( + request_name=metadata_name, + template_name=template_name, + template_version=template_version, + params=template_parameters.copy(), + logger=logger, + ) + + if creation_request_output["provisioningState"] == "failed": + raise kopf.PermanentError("Cluster creation permanently failed") + + @kopf.subhandler() + def sub_creation(*, patch, **kwargs): + patch.status["provisioningStatus"] = { + "provisioningMessage": "Cluster instance rendering completed", + "provisioningState": "progressing", + "provisioningUpdateTime": datetime.now().strftime(TIME_FORMAT), + } + + @kopf.subhandler(timeout=memo.creation_timeout) + def check_c_status(*, spec, patch, logger, memo: kopf.Memo, **kwargs): + creation_state_output = cluster_creation_status( + cluster_name=template_parameters["clusterName"], + timeout=memo.creation_timeout, + cluster_provisioner=memo.cluster_provisioner, + logger=logger, + ) + patch.status["provisioningStatus"] = creation_state_output["provisioningStatus"] + if "provisionedResourceSet" in creation_state_output.keys(): + patch.status["provisionedResourceSet"] = creation_state_output[ + "provisionedResourceSet" + ] + if creation_request_output["provisioningState"] == "failed": + raise kopf.PermanentError("Cluster creation permanently failed") + + +##health check +@kopf.on.probe(id="now") +def get_current_timestamp(**kwargs): + return datetime.datetime.now(datetime.timezone.utc).isoformat() + diff --git a/operators/o2ims-operator/controllers/provisioning_request_controller.py b/operators/o2ims-operator/controllers/provisioning_request_controller.py new file mode 100644 index 00000000..6cd3d99c --- /dev/null +++ b/operators/o2ims-operator/controllers/provisioning_request_controller.py @@ -0,0 +1,233 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## + +from utils import * +import time +import uuid + + +def check_creation_request_status( + request_name: str = None, + namespace: str = "default", + logger=None, +): + """ + :param request_name: Name of the provisioning request + :type request_name: str + :param namespace: Namespace in which PV will be created + :type namespace: str + :param logger: logger + :type logger: + :return: output + :rtype: dict + """ + output = check_o2ims_provisioning_request( + name=request_name, + namespace=namespace, + logger=logger, + ) + + return output + + +# Creating a package variant +def cluster_creation_request( + request_name: str = None, + template_name: str = None, + template_version: str = None, + params: dict = None, + namespace: str = "default", + logger=None, +): + """ + :param request_name: Name of the provisioning request + :type request_name: str + :param template_name: Git repository name which contains the template + :type template_name: str + :param template_version: Branch of the repository to use for the template + :type template_version: str + :param params: Parameters to provide to the template + :type params: dict + :param namespace: Namespace in which PV will be created + :type namespace: str + :param logger: logger + :type logger: + :return: output + :rtype: dict + """ + + # Git repository location + repo_location = UPSTREAM_PKG_REPO + # Add validation for clusterName + cluster_name = params["clusterName"] + params.pop("clusterName") + # Generate mutators from template parameters (params) + mutators = [] + for param in params: + if "labels" in param: + mutators.append( + { + "image": "gcr.io/kpt-fn/set-labels:v0.2.0", + "configMap": params["labels"], + } + ) + + # Generate package variant body + package_variant_body = { + "name": request_name, + "repo_location": repo_location, + "template_name": template_name, + "template_version": template_version, + "cluster_name": cluster_name, + "mutators": mutators, + "namespace": namespace, + "create": True, + } + reason = "Started" + _status = "False" + provisioning_message = "Cluster instance rendering ongoing" + provisioning_state = "progressing" + timer = 0 + # Short timeouts are fine to see if package variant has problems + timeout = 30 + try: + status = create_package_variant( + name=package_variant_body["name"], + namespace=namespace, + pv_param=package_variant_body, + logger=logger, + ) + while status["status"]: + creation_status = get_package_variant( + name=request_name, namespace=namespace, logger=logger + ) + if creation_status["status"] and "status" in creation_status["body"].keys(): + if ( + creation_status["body"]["status"]["conditions"] is not None + and len(creation_status["body"]["status"]["conditions"]) > 0 + ): + # Checking the status of the latest entry of the list + _status = creation_status["body"]["status"]["conditions"][-1][ + "status" + ] + reason = creation_status["body"]["status"]["conditions"][-1][ + "reason" + ] + if _status == "True": + provisioning_message = "Cluster instance rendering completed" + provisioning_state = "progressing" + break + elif _status == "False": + provisioning_message = ( + f"Cluster instance rendering failed {reason}" + ) + provisioning_state = "failed" + break + elif not creation_status["status"]: + _status = "False" + provisioning_message = "Cluster instance rendering failed" + provisioning_state = "failed" + break + if timer >= timeout: + provisioning_message = ( + "Cluster resource creation failed reached timeout" + ) + provisioning_state = "failed" + break + time.sleep(1) + timer += 1 + except Exception as e: + logger.error( + f"Exception {e} in creating package variant {package_variant_body['name']} in namespace {namespace}" + ) + provisioning_message = "Cluster instance rendering failed" + provisioning_state = "failed" + + output = { + "provisioningMessage": provisioning_message, + "provisioningState": provisioning_state, + } + return output + + +# Checking the status of cluster creation +# TODO check the status of package revision +def cluster_creation_status( + cluster_name: str, + namespace: str = "default", + timeout=1800, + cluster_provisioner="capi", + logger=None, +): + """ + :param cluster_name: Name of the provisioning request + :type cluster_name: str + :param namespace: Namespace in which PV will be created + :type namespace: str + :param timeout: Timeout after which cluster creation will be declared failed + :type timeout: int + :param cluster_provisioner: name of the cluster provisioner + :type cluster_provisioner: int + :param logger: logger + :type logger: + :return: output + :rtype: dict + """ + + provisioning_message = "Cluster resource creation ongoing" + provisioning_state = "progressing" + # Timer to check for timeout + timer = 0 + + if cluster_provisioner == "capi" and timer <= timeout: + while True: + cluster_status = get_capi_cluster( + name=cluster_name, namespace=namespace, logger=logger + ) + logger.debug(cluster_status) + if cluster_status["status"]: + if "status" in cluster_status["body"].keys(): + if cluster_status["body"]["status"]["phase"] == "Provisioned": + provisioning_message = "Cluster resource created" + provisioning_state = "fulfilled" + break + if timer >= timeout: + provisioning_message = ( + "Cluster resource creation failed reached timeout" + ) + provisioning_state = "failed" + break + timer += 1 + time.sleep(1) + + output = { + "provisioningStatus": { + "provisioningUpdateTime": datetime.now().strftime(TIME_FORMAT), + "provisioningMessage": provisioning_message, + "provisioningState": provisioning_state, + } + } + + if provisioning_state == "fulfilled": + output.update( + { + "provisionedResourceSet": { + "oCloudNodeClusterId": str(uuid.uuid4()), + "oCloudInfrastructureResourceIds": [str(uuid.uuid4())], + } + } + ) + return output diff --git a/operators/o2ims-operator/controllers/provisioning_request_validation_controller.py b/operators/o2ims-operator/controllers/provisioning_request_validation_controller.py new file mode 100644 index 00000000..0650b1db --- /dev/null +++ b/operators/o2ims-operator/controllers/provisioning_request_validation_controller.py @@ -0,0 +1,34 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## + + +def validate_cluster_creation_request(params: dict = None): + """ + :param params: Parameters to provide to the template + :type params: dict + :return: request_validation + :rtype: dict + """ + # Checking if clusterName and clusterProvisioner are in parameters + if "clusterName" in params: + request_validation = {"status": True} + else: + request_validation = { + "reason": "clusterName is missing in template parameters", + "status": False, + } + + return request_validation diff --git a/operators/o2ims-operator/controllers/utils.py b/operators/o2ims-operator/controllers/utils.py new file mode 100644 index 00000000..dd182975 --- /dev/null +++ b/operators/o2ims-operator/controllers/utils.py @@ -0,0 +1,274 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## + +import os +from datetime import datetime +from dateutil.tz import tzutc +import requests + +requests.packages.urllib3.disable_warnings() + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +# Allowed values vanilla/Openshift +KUBERNETES_TYPE = str(os.getenv("KUBERNETES_TYPE", "vanilla")).lower() +# Labels to put inside the owned resources +LABEL = {"owner": "o2ims.provisioning.oran.org.provisioningrequests"} +# Log level of the controller +LOG_LEVEL = str(os.getenv("LOG_LEVEL", "INFO")) +# To verify HTTPs certificates when communicating with cluster +HTTPS_VERIFY = bool(os.getenv("HTTPS_VERIFY", False)) +# Token used to communicate with Kube cluster +TOKEN = os.getenv("TOKEN", "/var/run/secrets/kubernetes.io/serviceaccount/token") +TOKEN = os.popen(f"cat {TOKEN}").read() +KUBERNETES_BASE_URL = str(os.getenv("KUBERNETES_BASE_URL", "http://127.0.0.1:8080")) +UPSTREAM_PKG_REPO = os.getenv("UPSTREAM_PKG_REPO", "catalog-infra-capi") + +HEADERS_DICT = { + "Content-type": "application/json", + "Accept": "application/json", + "User-Agent": "kopf_o2ims_operator/python", + "Authorization": "Bearer {}".format(TOKEN), +} +CLUSTER_PROVISIONER = str(os.getenv("CLUSTER_PROVISIONER", "capi")) +CREATION_TIMEOUT = int(os.getenv("CREATION_TIMEOUT", 1800)) + + +def create_package_variant( + name: str = None, + namespace: str = None, + pv_param: dict = None, + label: dict = LABEL, + logger=None, +): + """ + :param name: name of the package variant + :type name: str + :param namespace: Namespace name + :type namespace: str + :param pv_param: parameters of package variant + :type pv_param: dict + :param label: label for pv resource + :type label: dict + :param logger: logger + :type logger: + :return: response + :rtype: dict + """ + if logger: + logger.debug("create_package_variant") + r = get_package_variant(name, namespace, logger) + if "reason" in r and r["reason"] == "notFound" and pv_param["create"]: + pv_body = { + "apiVersion": "config.porch.kpt.dev/v1alpha1", + "kind": "PackageVariant", + "metadata": {"name": f"{pv_param['name']}", "label": f"{label}"}, + "spec": { + "upstream": { + "repo": f"{pv_param['repo_location']}", + "package": f"{pv_param['template_name']}", + "revision": f"{pv_param['template_version']}", + }, + "downstream": { + # TODO: should the repo be configurable instead of being hardcoded? + "repo": "mgmt", + "package": f"{pv_param['cluster_name']}", + }, + "annotations": {"approval.nephio.org/policy": "initial"}, + "pipeline": {"mutators": pv_param["mutators"]}, + }, + } + if logger: + logger.debug( + f"package-variant {name} does not exist in namespace {namespace}, o2ims operator is creating it now" + ) + r = requests.post( + f"{KUBERNETES_BASE_URL}/apis/config.porch.kpt.dev/v1alpha1/namespaces/{namespace}/packagevariants", + headers=HEADERS_DICT, + json=pv_body, + verify=HTTPS_VERIFY, + ) + if logger: + logger.debug( + "response of the request to create package variant %s is %s" + % (r.request.url, r.json()) + ) + if r.status_code in [200, 201]: + response = {"status": True, "name": name} + elif r.status_code in [401, 403]: + response = {"status": False, "reason": "unauthorized"} + elif r.status_code == 404: + response = {"status": False, "reason": "notFound"} + elif r.status_code == 400: + response = {"status": False, "reason": r.json()["message"]} + elif r.status_code == 500: + response = {"status": False, "reason": "k8sApi server is not reachable"} + else: + response = {"status": False, "reason": r.json()} + elif r["status"] == True and "name" in r: + response = {"status": r["status"], "name": r["name"]} + else: + response = {"status": r["status"], "reason": r["reason"]} + if logger: + logger.debug(response) + return response + + +def get_package_variant(name: str = None, namespace: str = None, logger=None): + """ + :param name: name of the package variant + :type name: str + :param namespace: Namespace name + :type namespace: str + :param logger: logger + :type logger: + :return: response + :rtype: dict + """ + if logger: + logger.debug("get package variant") + try: + r = requests.get( + f"{KUBERNETES_BASE_URL}/apis/config.porch.kpt.dev/v1alpha1/namespaces/{namespace}/packagevariants/{name}", + headers=HEADERS_DICT, + verify=HTTPS_VERIFY, + ) + except Exception as e: + if logger: + logger.debug("get_package_variant error: %s" % (e)) + return {"status": False, "reason": f"NotAbleToCommunicateWithTheCluster {e}"} + if logger: + logger.debug( + "response of the request to get package variant %s is %s" + % (r.request.url, r.json()) + ) + if r.status_code in [200]: + response = {"status": True, "name": name, "body": r.json()} + elif r.status_code in [401, 403]: + response = {"status": False, "reason": "unauthorized"} + elif r.status_code == 404: + response = {"status": False, "reason": f"notFound"} + elif r.status_code == 500: + response = {"status": False, "reason": "k8sApi server is not reachable"} + else: + response = {"status": False, "reason": r.json()} + if logger: + logger.debug("Status %s" % (response)) + return response + + +def check_o2ims_provisioning_request( + name: str = None, namespace: str = None, logger=None +): + """ + :param name: cluster name + :type name: str + :param namespace: Namespace name + :type namespace: str + :param logger: logger + :type logger: + :return: response + :rtype: dict + """ + if logger: + logger.debug("get_capi_cluster") + + try: + r = requests.get( + f"{KUBERNETES_BASE_URL}/apis/o2ims.provisioning.oran.org/v1alpha1/provisioningrequests", + headers=HEADERS_DICT, + verify=HTTPS_VERIFY, + ) + except Exception as e: + if logger: + logger.debug("check_o2ims_provisioning_request error: %s" % (e)) + return {"status": False, "reason": f"NotAbleToCommunicateWithTheCluster {e}"} + if r.status_code in [200] and "status" in r.json().keys(): + response = { + "status": True, + "provisioningStatus": r.json()["status"]["provisioningStatus"], + } + if "provisionedResourceSet" in r.json()["status"]: + response.update( + {"provisionedResourceSet": r.json()["status"]["provisionedResourceSet"]} + ) + elif r.status_code in [200] and "status" not in r.json().keys(): + response = { + "status": True, + "provisioningStatus": { + "provisioningMessage": "Cluster provisioning request received", + "provisioningState": "progressing", + }, + } + elif r.status_code in [401, 403]: + response = {"status": False, "reason": "unauthorized"} + elif r.status_code == 404: + response = {"status": False, "reason": "notFound"} + creation_status = get_package_variant( + name=name, namespace=namespace, logger=logger + ) + response.update({"pv": creation_status["status"]}) + elif r.status_code == 500: + response = {"status": False, "reason": "k8sApi server is not reachable"} + else: + response = { + "status": False, + "reason": r.json(), + } + if logger: + logger.debug(f"check_o2ims_provisioning_request response: {r.json()}") + return response + + +def get_capi_cluster(name: str = None, namespace: str = None, logger=None): + """ + :param name: cluster name + :type name: str + :param namespace: Namespace name + :type namespace: str + :param logger: logger + :type logger: + :return: response + :rtype: dict + """ + if logger: + logger.debug("get_capi_cluster") + + try: + r = requests.get( + f"{KUBERNETES_BASE_URL}/apis/cluster.x-k8s.io/v1beta1/namespaces/{namespace}/clusters/{name}", + headers=HEADERS_DICT, + verify=HTTPS_VERIFY, + ) + except Exception as e: + if logger: + logger.debug("get_capi_cluster error: %s" % (e)) + return {"status": False, "reason": f"NotAbleToCommunicateWithTheCluster {e}"} + if r.status_code in [200]: + response = {"status": True, "body": r.json()} + elif r.status_code in [401, 403]: + response = {"status": False, "reason": "unauthorized"} + elif r.status_code == 404: + response = {"status": False, "reason": "notFound"} + elif r.status_code == 500: + response = {"status": False, "reason": "k8sApi server is not reachable"} + else: + response = { + "status": False, + "reason": r.json()["status"]["conditions"][0]["message"], + } + if logger: + logger.debug(f"get_capi_cluster response: {r.json()}") + return response diff --git a/operators/o2ims-operator/requirements.txt b/operators/o2ims-operator/requirements.txt new file mode 100644 index 00000000..eb0b904a --- /dev/null +++ b/operators/o2ims-operator/requirements.txt @@ -0,0 +1,5 @@ +## requirements.txt +jinja2==3.1.2 +kopf==1.36.0 +requests==2.32.3 +python-dateutil \ No newline at end of file diff --git a/operators/o2ims-operator/tests/__init__.py b/operators/o2ims-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/operators/o2ims-operator/tests/create-cluster.sh b/operators/o2ims-operator/tests/create-cluster.sh new file mode 100755 index 00000000..8ed0832a --- /dev/null +++ b/operators/o2ims-operator/tests/create-cluster.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +## A script to create a kind cluster for testing the operator +set -eo pipefail +NEPHIO_TAG=main +kpt_dir=/tmp +CATALOG_REPO=https://github.com/nephio-project/catalog.git + +echo "------Deploying Nephio components from tag $NEPHIO_TAG------" + +# Create a kpt package +create_kpt_package() { + rm -rf "${kpt_dir:?}/$2" + kpt pkg get --for-deployment "$1"/$NEPHIO_TAG $kpt_dir/"$2" + kpt fn render "${kpt_dir:?}/$2" + kpt live init "${kpt_dir:?}/$2" + kpt live apply "${kpt_dir:?}/$2" --reconcile-timeout=15m --output=table + rm -rf "${kpt_dir:?}/$2" +} + +## Always delete the cluster +kind delete cluster -n o2ims-mgmt || true +kind create cluster --config="$(dirname "$0")"/mgmt-cluster.yaml --wait 5m +kubectl cluster-info --context kind-o2ims-mgmt + +# Gitea +create_kpt_package $CATALOG_REPO/distros/sandbox/gitea@origin gitea +# Porch +create_kpt_package $CATALOG_REPO/nephio/core/porch@origin porch +# MetalLB +create_kpt_package $CATALOG_REPO/distros/sandbox/metallb@origin metallb +# MetalLB Configuration +create_kpt_package $CATALOG_REPO/distros/sandbox/metallb-sandbox-config@origin metallb-sandbox-config +# Gitea IP Address +kubectl get svc -n gitea gitea +# Cluster Provisioning Cert Manager +create_kpt_package $CATALOG_REPO/distros/sandbox/cert-manager@origin cert-manager +# CAPI +create_kpt_package $CATALOG_REPO/infra/capi/cluster-capi@origin cluster-capi +# CAPI Infra +create_kpt_package $CATALOG_REPO/infra/capi/cluster-capi-infrastructure-docker@origin cluster-capi-infrastructure-docker +# CAPI Cluster Templates +create_kpt_package $CATALOG_REPO/infra/capi/cluster-capi-kind-docker-templates@origin cluster-capi-kind-docker-templates +# ConfigSync +create_kpt_package $CATALOG_REPO/nephio/core/configsync@origin configsync +# Resource Backend Operator +create_kpt_package $CATALOG_REPO/nephio/optional/resource-backend@origin resource-backend +# Nephio Core Opertaor +create_kpt_package $CATALOG_REPO/nephio/core/nephio-operator@origin nephio-operator + +# Create Gitea secret +kubectl apply -f - < /tmp/porch-token + +# Create CRD +kubectl create -f https://raw.githubusercontent.com/nephio-project/api/refs/heads/main/config/crd/bases/o2ims.provisioning.oran.org_provisioningrequests.yaml +export TOKEN=/tmp/porch-token ## important for development environment + +# Exposing the kube proxy for development, killing previous proxy sessions if they exist +pkill kubectl +nohup kubectl proxy --port 8080 &>/dev/null & +echo "Cluster is properly configured and proxy is running at 8080" diff --git a/operators/o2ims-operator/tests/deployment/operator.yaml b/operators/o2ims-operator/tests/deployment/operator.yaml new file mode 100644 index 00000000..9967edcd --- /dev/null +++ b/operators/o2ims-operator/tests/deployment/operator.yaml @@ -0,0 +1,116 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## +apiVersion: v1 +kind: Namespace +metadata: + name: o2ims +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: o2ims + name: o2ims-operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: o2ims:provisioning-role +rules: + - apiGroups: [""] + resources: [events] + verbs: [create] + - apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [get, list, watch, create, update, patch] + - apiGroups: ["o2ims.provisioning.oran.org"] + resources: [provisioningrequests,provisioningrequests/status,provisioningrequests/finalizers] + verbs: [get, list, watch, update, patch] + - apiGroups: ["cluster.x-k8s.io"] + resources: [clusters] + verbs: [get, list, watch, create, update, patch, delete] + - apiGroups: [admissionregistration.k8s.io/v1, admissionregistration.k8s.io/v1beta1] + resources: [validatingwebhookconfigurations, mutatingwebhookconfigurations] + verbs: [create, patch] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: o2ims:provisioning +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: o2ims:provisioning-role +subjects: + - kind: ServiceAccount + name: o2ims-operator + namespace: o2ims +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: o2ims:porch-controllers-packagevariants +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: porch-controllers-packagevariants +subjects: +- kind: ServiceAccount + name: o2ims-operator + namespace: o2ims +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: o2ims-operator + namespace: o2ims + labels: + app.kubernetes.io/name: nephio-o2ims +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: nephio-o2ims + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: nephio-o2ims + spec: + securityContext: + runAsGroup: 0 + runAsUser: 0 + containers: + - name: nephio-o2ims + image: docker.io/library/o2ims:latest +# command: ["sleep","10000"] + imagePullPolicy: IfNotPresent + env: + - name: 'UPSTREAM_PKG_REPO' + value: 'catalog-infra-capi' + - name: 'KUBERNETES_BASE_URL' + value: 'https://kubernetes.default.svc' + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "100m" + dnsPolicy: ClusterFirst + restartPolicy: Always + serviceAccountName: o2ims-operator + terminationGracePeriodSeconds: 5 diff --git a/operators/o2ims-operator/tests/mgmt-cluster.yaml b/operators/o2ims-operator/tests/mgmt-cluster.yaml new file mode 100644 index 00000000..6821e13c --- /dev/null +++ b/operators/o2ims-operator/tests/mgmt-cluster.yaml @@ -0,0 +1,12 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +name: o2ims-mgmt +networking: + podSubnet: "10.196.0.0/16" + serviceSubnet: "10.97.0.0/16" +nodes: +- role: control-plane + image: kindest/node:v1.31.0 + extraMounts: + - hostPath: /var/run/docker.sock + containerPath: /var/run/docker.sock diff --git a/operators/o2ims-operator/tests/sa-test-pod.yaml b/operators/o2ims-operator/tests/sa-test-pod.yaml new file mode 100644 index 00000000..3d6d4d0e --- /dev/null +++ b/operators/o2ims-operator/tests/sa-test-pod.yaml @@ -0,0 +1,50 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: o2ims:events-role +rules: + - apiGroups: [""] + resources: [events] + verbs: [create] + - apiGroups: ["o2ims.provisioning.oran.org"] + resources: [provisioningrequests,provisioningrequests/status,provisioningrequests/finalizers] + verbs: [get, list, watch, update, patch] + - apiGroups: ["cluster.x-k8s.io"] + resources: [clusters] + verbs: [get, list, watch, create, update, patch, delete] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: o2ims:events +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: o2ims:events-role +subjects: + - kind: ServiceAccount + name: porch-server + namespace: porch-system +--- +apiVersion: v1 +kind: Pod +metadata: + name: porch-sa-test + namespace: porch-system + labels: + app: testo2ims +spec: + serviceAccountName: porch-server + ## In case you have reached docker limit + # imagePullSecrets: + # - name: docker-registry + containers: + - image: busybox:1.29 + imagePullPolicy: IfNotPresent + name: busybox + command: + - /bin/sh + args: + - -c + - sleep 100000 + diff --git a/operators/o2ims-operator/tests/test_utils.py b/operators/o2ims-operator/tests/test_utils.py new file mode 100644 index 00000000..c3ba131d --- /dev/null +++ b/operators/o2ims-operator/tests/test_utils.py @@ -0,0 +1,264 @@ +########################################################################### +# Copyright 2025 The Nephio Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +########################################################################## + +import responses +import os +import pytest +import random +import string + +from controllers.utils import * + +# Constants used for testing +NAME = "test_name" +NAMESPACE = "test_ns" +TEST_JSON = {"status": {"conditions": [{"message": "test"}]}, "message": "message"} +PV_PARAM = { + "name": "name", + "repo_location": "location", + "template_name": "template", + "template_version": "version", + "cluster_name": "cluster", + "mutators": "mutators", + "namespace": "namespace", + "create": False, +} +PV_REV = { + "items": [ + { + "metadata": {"name": "name"}, + "spec": {"lifecycle": "lifecycle", "packageName": NAME}, + } + ] +} +PR_PARAMS = { + "status": { + "provisioningStatus": "provisioningStatus", + "provisionedResourceSet": "provisionedResourceSet", + } +} +PACKAGE_VARIANTS_URI = f"{KUBERNETES_BASE_URL}/apis/config.porch.kpt.dev/v1alpha1/namespaces/{NAMESPACE}/packagevariants" +PACKAGE_REVISIONS_URI = f"{KUBERNETES_BASE_URL}/apis/porch.kpt.dev/v1alpha1/namespaces/{NAMESPACE}/packagerevisions" +PROVISIONING_REQUEST_URI = f"{KUBERNETES_BASE_URL}/apis/o2ims.provisioning.oran.org/v1alpha1/provisioningrequests" +CAPI_URI = f"{KUBERNETES_BASE_URL}/apis/cluster.x-k8s.io/v1beta1/namespaces/{NAMESPACE}/clusters/{NAME}" + + +@pytest.fixture(autouse=True) +def setup_and_teardown(): + # Create a test token in /tmp + test_utils_token_path = "/tmp/test_utils_token" + test_utils_token_path += "".join(random.choices(string.ascii_letters + string.digits, k=10)) + os.environ["TOKEN"] = test_utils_token_path + with open(test_utils_token_path, "w") as fp: + pass + # Wait for tests to finish + yield + # Cleanup token + if os.path.exists(test_utils_token_path): + os.remove(test_utils_token_path) + + +@responses.activate +@pytest.mark.parametrize( + "get_code, post_code, status, create, response_2, response_2_value, exception", + [ + (200, None, True, False, "name", NAME, False), + (401, None, False, False, "reason", "unauthorized", False), + (403, None, False, False, "reason", "unauthorized", False), + (404, 200, True, True, "name", NAME, False), + (404, 201, True, True, "name", NAME, False), + (404, 401, False, True, "reason", "unauthorized", False), + (404, 403, False, True, "reason", "unauthorized", False), + (404, 404, False, True, "reason", "notFound", False), + (404, 400, False, True, "reason", TEST_JSON["message"], False), + (404, 1234, False, True, "reason", TEST_JSON, False), + (404, None, False, True, "reason", "NotAbleToCommunicateWithTheCluster ", True), + (404, 200, False, False, "reason", "notFound", False), + (500, None, False, False, "reason", "k8sApi server is not reachable", False), + (1234, None, False, False, "reason", TEST_JSON, False), + (None, None, False, False, "reason", "NotAbleToCommunicateWithTheCluster ", True), + ], +) +def test_create_package_variant(get_code, post_code, status, create, response_2, response_2_value, exception): + if not exception: + responses.get( + f"{PACKAGE_VARIANTS_URI}/{NAME}", + json=TEST_JSON, + status=get_code, + ) + else: + responses.get( + f"{PACKAGE_VARIANTS_URI}/{NAME}", + body=Exception(""), + ) + + pv_params = PV_PARAM.copy() + if get_code == 404 and create: + responses.post( + PACKAGE_VARIANTS_URI, + json=TEST_JSON, + status=post_code, + ) + pv_params.update({"create": True}) + + response = create_package_variant(NAME, NAMESPACE, pv_params) + assert response["status"] == status and response[response_2] == response_2_value + + +@responses.activate +@pytest.mark.parametrize( + "http_code, status, response_2, response_2_value, response_3, response_3_value, exception", + [ + (200, True, "name", NAME, "body", TEST_JSON, False), + (401, False, "reason", "unauthorized", None, None, False), + (403, False, "reason", "unauthorized", None, None, False), + (404, False, "reason", "notFound", None, None, False), + (1234, False, "reason", TEST_JSON, None, None, False), + (None, False, "reason", "NotAbleToCommunicateWithTheCluster ", None, None, True), + ], +) +def test_get_package_variant( + http_code, + status, + response_2, + response_2_value, + response_3, + response_3_value, + exception, +): + if not exception: + responses.get( + f"{PACKAGE_VARIANTS_URI}/{NAME}", + json=TEST_JSON, + status=http_code, + ) + else: + responses.get( + f"{PACKAGE_VARIANTS_URI}/{NAME}", + body=Exception(""), + ) + response = get_package_variant(NAME, NAMESPACE) + assert response["status"] == status and response[response_2] == response_2_value + if response_3: + assert response[response_3] == response_3_value + + +@responses.activate +@pytest.mark.parametrize( + "pr_code, status, status_response, pv_code, response_2, response_2_value, response_3, response_3_value, response_3_exception, exception", + [ + (200, True, True, None, "provisioningStatus", PR_PARAMS["status"]["provisioningStatus"], None, None, None, False), + ( + 200, + True, + False, + None, + "provisioningStatus", + { + "provisioningMessage": "Cluster provisioning request received", + "provisioningState": "progressing", + }, + None, + None, + None, + False, + ), + (401, False, False, None, "reason", "unauthorized", None, None, None, False), + (403, False, False, None, "reason", "unauthorized", None, None, None, False), + (404, False, False, 200, "reason", "notFound", "pv", True, None, False), + (404, False, False, 401, "reason", "notFound", "pv", False, None, False), + (404, False, False, 403, "reason", "notFound", "pv", False, None, False), + (404, False, False, 404, "reason", "notFound", "pv", False, None, False), + (404, False, False, 1234, "reason", "notFound", "pv", False, None, False), + (404, False, False, None, "reason", "notFound", "pv", False, True, False), + (1234, False, False, None, "reason", PR_PARAMS, None, None, None, False), + (None, False, False, None, "reason", "NotAbleToCommunicateWithTheCluster ", None, None, None, True), + ], +) +def test_check_o2ims_provisioning_request( + pr_code, + status, + status_response, + pv_code, + response_2, + response_2_value, + response_3, + response_3_value, + response_3_exception, + exception, +): + if not exception: + pr_params = PR_PARAMS.copy() + if pr_code == 200 and not status_response: + pr_params.pop("status") + + responses.get( + PROVISIONING_REQUEST_URI, + json=pr_params, + status=pr_code, + ) + + else: + responses.get( + PROVISIONING_REQUEST_URI, + body=Exception(""), + ) + + if pv_code and not response_3_exception: + responses.get( + f"{PACKAGE_VARIANTS_URI}/{NAME}", + json=TEST_JSON, + status=pv_code, + ) + elif pv_code and response_3_exception: + responses.get( + f"{PACKAGE_VARIANTS_URI}/{NAME}", + body=Exception(""), + ) + response = check_o2ims_provisioning_request(NAME, NAMESPACE) + print(response) + assert response["status"] == status and response[response_2] == response_2_value + + if pv_code: + assert response[response_3] == response_3_value + + +@responses.activate +@pytest.mark.parametrize( + "http_code, status, response_2, response_2_value, exception", + [ + (200, True, "body", TEST_JSON, False), + (401, False, "reason", "unauthorized", False), + (403, False, "reason", "unauthorized", False), + (404, False, "reason", "notFound", False), + (1234, False, "reason", TEST_JSON["status"]["conditions"][0]["message"], False), + (None, False, "reason", "NotAbleToCommunicateWithTheCluster ", True), + ], +) +def test_get_capi_cluster(http_code, status, response_2, response_2_value, exception): + if not exception: + responses.get( + CAPI_URI, + json=TEST_JSON, + status=http_code, + ) + else: + responses.get( + CAPI_URI, + body=Exception(""), + ) + response = get_capi_cluster(NAME, NAMESPACE) + assert response["status"] == status and response[response_2] == response_2_value diff --git a/operators/o2ims-operator/tests/unit_test_requirements.txt b/operators/o2ims-operator/tests/unit_test_requirements.txt new file mode 100644 index 00000000..2ec506dc --- /dev/null +++ b/operators/o2ims-operator/tests/unit_test_requirements.txt @@ -0,0 +1,2 @@ +responses +pytest \ No newline at end of file diff --git a/operators/o2ims-operator/tox.ini b/operators/o2ims-operator/tox.ini new file mode 100644 index 00000000..dee53306 --- /dev/null +++ b/operators/o2ims-operator/tox.ini @@ -0,0 +1,18 @@ +[tox] +envlist = py311, lint + +[testenv:py311] +deps = + -r requirements.txt + -r tests/unit_test_requirements.txt + pytest-cov +commands = + pytest --maxfail=1 --disable-warnings -q + +# Lint environment for running code style checks +[testenv:lint] +# Dependencies for linting with flake8 +deps = + flake8 +commands = + flake8 controllers