diff --git a/ocs_ci/helpers/dr_helpers.py b/ocs_ci/helpers/dr_helpers.py index d0008fabce5..32a6c96a1b7 100644 --- a/ocs_ci/helpers/dr_helpers.py +++ b/ocs_ci/helpers/dr_helpers.py @@ -35,6 +35,7 @@ TimeoutSampler, CommandFailed, run_cmd, + exec_cmd, ) from ocs_ci.helpers.helpers import run_cmd_verify_cli_output @@ -1321,7 +1322,6 @@ def verify_drpolicy_cli(switch_ctx=None): @retry(UnexpectedBehaviour, tries=40, delay=5, backoff=5) def verify_backup_is_taken(): - """ Function to verify backup is taken @@ -1661,3 +1661,57 @@ def generate_kubeobject_capture_interval(): return 5 else: return capture_interval + + +def disable_dr_rdr(): + """ + Disable DR for the applications + """ + config.switch_acm_ctx() + + # Edit drpc to add annotation + drpc_obj = ocp.OCP(kind=constants.DRPC) + drpcs = drpc_obj.get(all_namespaces=True).get("items") + for drpc in drpcs: + namespace = drpc["metadata"]["namespace"] + name = drpc["metadata"]["name"] + logger.info(f"Adding annotation to drpc - {name}") + annotation_data = ( + '{"metadata": {"annotations": {' + '"drplacementcontrol.ramendr.openshift.io/do-not-delete-pvc": "true"}}}' + ) + cmd = f"oc patch {constants.DRPC} {name} -n {namespace} --type=merge -p '{annotation_data}'" + run_cmd(cmd) + + # Delete all drpc + logger.info("Deleting the drpc...") + run_cmd("oc delete drpc --all -A") + sample = TimeoutSampler( + timeout=300, + sleep=5, + func=verify_drpc_deletion, + cmd="oc get drpc -A", + expected_output_lst="No resources found", + ) + if not sample.wait_for_func_status(result=True): + raise Exception("All drpcs are not deleted") + + +@retry(CommandFailed, tries=10, delay=30, backoff=1) +def verify_drpc_deletion(cmd, expected_output_lst): + """ + Function to validate drpc deletion + + Args: + cmd(str): cli command + expected_output_lst(set): A set of strings that need to be included in the command output. + + Returns: + bool: True, if all strings are included in the command output, False otherwise. + + """ + drpc_out = exec_cmd(cmd) + for expected_output in expected_output_lst: + if expected_output not in drpc_out.stderr.decode(): + return False + return True diff --git a/tests/functional/disaster-recovery/regional-dr/test_disable_dr.py b/tests/functional/disaster-recovery/regional-dr/test_disable_dr.py new file mode 100644 index 00000000000..d18e724ece3 --- /dev/null +++ b/tests/functional/disaster-recovery/regional-dr/test_disable_dr.py @@ -0,0 +1,96 @@ +import logging +import pytest +from time import sleep + +from ocs_ci.framework import config +from ocs_ci.framework.testlib import tier1 +from ocs_ci.framework.pytest_customization.marks import turquoise_squad +from ocs_ci.helpers import dr_helpers +from ocs_ci.ocs import constants +from ocs_ci.ocs.resources.drpc import DRPC + + +logger = logging.getLogger(__name__) + + +@rdr +@tier1 +@turquoise_squad +class TestDisableDR: + """ + Test Disable Disaster Recovery + + """ + + @pytest.mark.parametrize( + argnames=["pvc_interface"], + argvalues=[ + pytest.param( + constants.CEPHBLOCKPOOL, + marks=pytest.mark.polarion_id("OCS-6209"), + ), + pytest.param( + constants.CEPHFILESYSTEM, + marks=pytest.mark.polarion_id("OCS-6241"), + ), + ], + ) + def test_disable_dr(self, pvc_interface, dr_workload): + """ + Test to verify disable DR of application + + """ + + rdr_workload = dr_workload( + num_of_subscription=1, num_of_appset=1, pvc_interface=pvc_interface + ) + drpc_subscription = DRPC(namespace=rdr_workload[0].workload_namespace) + drpc_appset = DRPC( + namespace=constants.GITOPS_CLUSTER_NAMESPACE, + resource_name=f"{rdr_workload[1].appset_placement_name}-drpc", + ) + drpc_objs = [drpc_subscription, drpc_appset] + + scheduling_interval = dr_helpers.get_scheduling_interval( + rdr_workload[0].workload_namespace, + ) + wait_time = 2 * scheduling_interval # Time in minutes + logger.info(f"Waiting for {wait_time} minutes to run IOs") + sleep(wait_time * 60) + + # Check lastGroupSyncTime + for drpc_obj in drpc_objs: + dr_helpers.verify_last_group_sync_time(drpc_obj, scheduling_interval) + + logger.info("Verified the lastGroupSyncTime before disabling the DR") + + primary_cluster_name = dr_helpers.get_current_primary_cluster_name( + rdr_workload[0].workload_namespace, + ) + + # Disable DR + dr_helpers.disable_dr_rdr() + + # Verify resources deletion from primary cluster + config.switch_to_cluster_by_name(primary_cluster_name) + + # Verify replication resource deletion on primary cluster + for workload in rdr_workload: + logger.info( + f"Validating replication resource deletion in namespace {workload.workload_namespace}..." + ) + dr_helpers.wait_for_replication_resources_deletion( + workload.workload_namespace, + timeout=300, + check_state=False, + ) + # Verify pod status on primary cluster + logger.info( + f"Validate pods and pvc in {workload.workload_namespace} be in Running state" + ) + dr_helpers.wait_for_all_resources_creation( + workload.workload_pvc_count, + workload.workload_pod_count, + workload.workload_namespace, + skip_replication_resources=True, + )