Skip to content

Commit

Permalink
Addressed comments
Browse files Browse the repository at this point in the history
  • Loading branch information
keemano committed Sep 23, 2024
1 parent f8b35a5 commit d9f20a6
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 34 deletions.
9 changes: 6 additions & 3 deletions ocs_ci/helpers/dr_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1603,7 +1603,8 @@ def generate_kubeobject_capture_interval():
else:
return capture_interval

def disable_dr_rdr(workload_type):

def disable_dr_rdr():
"""
Disable DR for the applications
"""
Expand All @@ -1616,8 +1617,10 @@ def disable_dr_rdr(workload_type):
namespace = drpc["metadata"]["namespace"]
name = drpc["metadata"]["name"]
logger.info(f"Adding annotation to drpc - {name}")
annotation_data = '{"metadata": {"annotations": {' \
'"drplacementcontrol.ramendr.openshift.io/do-not-delete-pvc": "true"}}}'
annotation_data = (
'{"metadata": {"annotations": {'
'"drplacementcontrol.ramendr.openshift.io/do-not-delete-pvc": "true"}}}'
)
cmd = f"oc patch {constants.DRPC} {name} -n {namespace} --type=merge -p '{annotation_data}'"
run_cmd(cmd)

Expand Down
60 changes: 29 additions & 31 deletions tests/functional/disaster-recovery/regional-dr/test_disable_dr.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import logging
from time import sleep

import pytest

from ocs_ci.framework import config
from ocs_ci.framework.testlib import tier1
from ocs_ci.framework.pytest_customization.marks import turquoise_squad
from ocs_ci.helpers import dr_helpers
from ocs_ci.ocs.resources.pod import check_pods_in_statuses
from ocs_ci.ocs import constants

logger = logging.getLogger(__name__)
Expand All @@ -21,62 +21,60 @@ class TestDisableDR:
"""

@pytest.mark.parametrize(
argnames=["workload_type"],
argnames=["pvc_interface"],
argvalues=[
pytest.param(
*[constants.SUBSCRIPTION],
constants.CEPHBLOCKPOOL,
marks=pytest.mark.polarion_id("OCS-6209"),
),
pytest.param(
*[constants.APPLICATION_SET],
marks=pytest.mark.polarion_id("OCS-6209"),
constants.CEPHFILESYSTEM,
marks=pytest.mark.polarion_id("OCS-6241"),
),
],
)
def test_disable_dr(self, workload_type, dr_workload):
def test_disable_dr(self, pvc_interface, dr_workload):
"""
Test to verify disable DR of application
"""

if workload_type == constants.SUBSCRIPTION:
rdr_workload = dr_workload(num_of_subscription=1)[0]

if workload_type == constants.APPLICATION_SET:
rdr_workload = dr_workload(
num_of_subscription=0, num_of_appset=1
)[0]
rdr_workload = dr_workload(
num_of_subscription=1, num_of_appset=1, pvc_interface=pvc_interface
)

primary_cluster_name = dr_helpers.get_current_primary_cluster_name(
rdr_workload.workload_namespace, workload_type
rdr_workload[0].workload_namespace,
)

scheduling_interval = dr_helpers.get_scheduling_interval(
rdr_workload.workload_namespace, workload_type
rdr_workload[0].workload_namespace,
)
wait_time = 2 * scheduling_interval # Time in minutes
logger.info(f"Waiting for {wait_time} minutes to run IOs")
sleep(wait_time * 60)

# Disable DR
dr_helpers.disable_dr_rdr(workload_type)
dr_helpers.disable_dr_rdr()

# Verify resources deletion from primary cluster
config.switch_to_cluster_by_name(primary_cluster_name)

# Verify pods and pvc on primary cluster
logger.info(f"Validating pod,pvc on primary cluster - {primary_cluster_name}")
dr_helpers.wait_for_all_resources_creation(
rdr_workload.workload_pvc_count,
rdr_workload.workload_pod_count,
rdr_workload.workload_namespace,
skip_replication_resources=True,
)

# Verify replication resource deletion on primary cluster
logger.info("Validating replication resource deletion...")
dr_helpers.wait_for_replication_resources_deletion(
rdr_workload.workload_namespace,
timeout=300,
check_state=False,
)
for workload in rdr_workload:
logger.info(
f"Validating replication resource deletion in namespace {workload.workload_namespace}..."
)
dr_helpers.wait_for_replication_resources_deletion(
workload.workload_namespace,
timeout=300,
check_state=False,
)
# Verify pod status on primary cluster
logger.info(
f"Wait for all the pods in {workload.workload_namespace} to be in running state"
)
assert check_pods_in_statuses(
expected_statuses="Running",
namespace=workload.workload_namespace,
), "Not all the pods in running state"

0 comments on commit d9f20a6

Please sign in to comment.