Skip to content

Commit

Permalink
[ISSUE-1202]: Automate test
Browse files Browse the repository at this point in the history
Signed-off-by: Malgorzata Dutka <malgorzata.dutka@dell.com>
  • Loading branch information
mdutka-dell committed Jul 17, 2024
1 parent f77cde4 commit 8ff444b
Show file tree
Hide file tree
Showing 6 changed files with 279 additions and 26 deletions.
12 changes: 6 additions & 6 deletions tests/e2e-test-framework/conftest.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
from datetime import datetime
from typing import Generator
from typing import Generator, Dict
import pytest
import re

Expand Down Expand Up @@ -111,7 +111,7 @@ def get_utils(request) -> Utils:
namespace=request.config.getoption("--namespace")
)

def get_ssh_executors(request) -> dict[str, SSHCommandExecutor]:
def get_ssh_executors(request) -> Dict[str, SSHCommandExecutor]:
utils = get_utils(request)
ips = utils.get_worker_ips() + utils.get_controlplane_ips()
executors = {ip: SSHCommandExecutor(ip_address=ip, username=utils.vm_user, password=utils.vm_cred) for ip in ips}
Expand All @@ -122,11 +122,11 @@ def utils(request) -> Utils:
return get_utils(request)

@pytest.fixture(scope="session")
def ssh_executors(request) -> dict[str, SSHCommandExecutor]:
def ssh_executors(request) -> Dict[str, SSHCommandExecutor]:
return get_ssh_executors(request)

@pytest.fixture(scope="session")
def drive_utils_executors(request) -> dict[str, DriveUtils]:
def drive_utils_executors(request) -> Dict[str, DriveUtils]:
ssh_execs = get_ssh_executors(request)
return {ip: DriveUtils(executor) for ip, executor in ssh_execs.items()}

Expand All @@ -138,15 +138,15 @@ def link_requirements_in_background(request):
pytest.threads.append(requirements_thread)

@pytest.fixture(autouse=True)
def keep_drive_count(drive_utils_executors: dict[str, DriveUtils]) -> Generator[None, None, None]:
def keep_drive_count(drive_utils_executors: Dict[str, DriveUtils]) -> Generator[None, None, None]:
hosts_per_node_before = {ip: drive_utils.get_all_hosts() for ip, drive_utils in drive_utils_executors.items()}
yield
hosts_per_node_after = {ip: drive_utils.get_all_hosts() for ip, drive_utils in drive_utils_executors.items()}
for ip, drive_utils in drive_utils_executors.items():
drive_utils.rescan_missing_hosts(before=hosts_per_node_before[ip], after=hosts_per_node_after[ip])

@pytest.fixture(autouse=True)
def wipe_drives(drive_utils_executors: dict[str, DriveUtils]) -> Generator[None, None, None]:
def wipe_drives(drive_utils_executors: Dict[str, DriveUtils]) -> Generator[None, None, None]:
yield
for _, drive_utils in drive_utils_executors.items():
drive_utils.wipe_drives()
Expand Down
9 changes: 9 additions & 0 deletions tests/e2e-test-framework/framework/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
# statuses
STATUS_ONLINE = "ONLINE"
STATUS_OFFLINE = "OFFLINE"
STATUS_OPERATIVE = "OPERATIVE"

# annotation keys
DRIVE_HEALTH_ANNOTATION = "health"
Expand All @@ -48,6 +49,11 @@
# fake attach events
FAKE_ATTACH_INVOLVED = "FakeAttachInvolved"
FAKE_ATTACH_CLEARED = "FakeAttachCleared"
DRIVE_HEALTH_FAILURE = "DriveHealthFailure"
DRIVE_READY_FOR_REMOVAL = "DriveReadyForRemoval"
VOLUME_BAD_HEALTH = "VolumeBadHealth"
DRIVE_READY_FOR_PHYSICAL_REMOVAL = "DriveReadyForPhysicalRemoval"
DRIVE_SUCCESSFULLY_REMOVED = "DriveSuccessfullyRemoved"

# drive events
DRIVE_HEALTH_FAILURE_EVENT = "DriveHealthFailure"
Expand All @@ -60,3 +66,6 @@
ACR_PLURAL = "availablecapacityreservations"
LVG_PLURAL = "logicalvolumegroups"
VOLUMES_PLURAL = "volumes"

# led
LED_STATE = "1,2"
96 changes: 78 additions & 18 deletions tests/e2e-test-framework/framework/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ def wait_volume(
expected_status: Optional[str] = None,
expected_health: Optional[str] = None,
expected_usage: Optional[str] = None,
expected_operational_status: Optional[str] = None,
timeout: int = 60,
) -> bool:
"""
Expand All @@ -401,6 +402,7 @@ def wait_volume(
expected_status (Optional[str], optional): The expected status of the volume. Defaults to None.
expected_health (Optional[str], optional): The expected health of the volume. Defaults to None.
expected_usage (Optional[str], optional): The expected usage of the volume. Defaults to None.
expected_operational_status (Optional[str], optional): The expected operational status of the volume. Defaults to None.
timeout (int): The maximum time to wait for the volume in seconds. Defaults to 60.
Returns:
Expand All @@ -413,6 +415,8 @@ def wait_volume(
expected["Usage"] = expected_usage
if expected_health:
expected["Health"] = expected_health
if expected_operational_status:
expected['OperationalStatus'] = expected_operational_status

def callback():
return self.list_volumes(name)[0]
Expand All @@ -427,6 +431,7 @@ def wait_drive(
expected_status: Optional[str] = None,
expected_health: Optional[str] = None,
expected_usage: Optional[str] = None,
expected_led_state: Optional[str] = None,
timeout: int = 60,
) -> bool:
"""
Expand All @@ -437,6 +442,7 @@ def wait_drive(
expected_status (Optional[str], optional): The expected status of the drive. Defaults to None.
expected_health (Optional[str], optional): The expected health of the drive. Defaults to None.
expected_usage (Optional[str], optional): The expected usage of the drive. Defaults to None.
expected_led_state (Optional[str], optional): The expected LED state of the drive. Defaults to None.
timeout (int): The maximum time to wait for the drive in seconds. Defaults to 60.
Returns:
Expand All @@ -449,6 +455,8 @@ def wait_drive(
expected["Usage"] = expected_usage
if expected_health:
expected["Health"] = expected_health
if expected_led_state:
expected["LEDState"] = expected_led_state

def callback():
return self.custom_objects_api.get_cluster_custom_object(
Expand All @@ -463,15 +471,15 @@ def _wait_cr(
self,
expected: Dict[str, str],
get_cr_fn: Callable[[None], Any],
timeout: int = 60,
timeout: int = 90,
) -> bool:
"""
Waits for the custom resource (CR) to reach the expected state.
Args:
expected (dict): The expected state of the CR's spec.
get_cr_fn (callable): The function to get the CR.
timeout (int, optional): The timeout for checking the CR, defaults to 60.
timeout (int, optional): The timeout for checking the CR, defaults to 90.
Returns:
bool: True if the CR meets the expected state within the given timeout, False otherwise.
Expand All @@ -487,7 +495,7 @@ def _wait_cr(

cr = get_cr_fn()
for key, value in expected.items():
if cr["spec"][key] == value:
if cr["spec"][key] in value:
assertions[key] = True

if all(assertions.values()):
Expand Down Expand Up @@ -694,8 +702,8 @@ def recreate_pod(self, name: str, namespace: str) -> V1Pod:
time.sleep(5)
pod = self.list_pods(name, namespace=namespace)[0]
assert self.is_pod_ready(
name, timeout=120
), "pod not ready after 120 seconds timeout"
name, timeout=150
), "pod not ready after 150 seconds timeout"
logging.info(f"pod {name} is ready")

return pod
Expand Down Expand Up @@ -729,29 +737,81 @@ def wait_for_event_with_reason(
return False

def clear_pvc_and_pod(
self, pod_name: str, pvc_name: str, volume_name: str, namespace: str
self, pod_name: str, namespace: str, pvc_name: Optional[str] = None, volume_name: Optional[str] = None
) -> None:
"""
Clears the PersistentVolumeClaim (PVC) and the Pod with the specified names in the Kubernetes cluster.
If the name of pvc or volume is not specified it clears all PVCs connected with specific Pod.
Args:
pod_name (str): The name of the Pod to be cleared.
pvc_name (str): The name of the PersistentVolumeClaim to be cleared.
volume_name (str): The name of the volume to be checked.
namespace (str): The namespace of the PersistentVolumeClaim and Pod.
pvc_name (Optional[str], optional): The name of the PersistentVolumeClaim to be cleared.
volume_name (Optional[str], optional): The name of the volume to be checked.
Returns:
None: This function does not return anything.
"""
logging.info(f"clearing pvc {pvc_name} and pod {pod_name}")
self.core_v1_api.delete_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=namespace,
)
if pvc_name and volume_name:
logging.info(f"clearing pvc {pvc_name}")
self.core_v1_api.delete_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=namespace,
)
assert self.wait_volume(
name=volume_name,
expected_usage=const.USAGE_RELEASED,
), f"Volume: {volume_name} failed to reach expected usage: {const.USAGE_RELEASED}"
else:
pvcs = self.list_persistent_volume_claims(
namespace=namespace, pod_name=pod_name
)
for pvc in pvcs:
logging.info(f"clearing pvc {pvc.metadata.name}")
self.core_v1_api.delete_namespaced_persistent_volume_claim(
name=pvc.metadata.name,
namespace=namespace,
)
for pvc in pvcs:
assert self.wait_volume(
name=pvc.spec.volume_name,
expected_usage=const.USAGE_RELEASED,
), f"Volume: {pvc.spec.volume_name} failed to reach expected usage: {const.USAGE_RELEASED}"
logging.info(f"volume: {pvc.spec.volume_name} reach expected usage: {const.USAGE_RELEASED}")

time.sleep(30)
self.recreate_pod(name=pod_name, namespace=namespace)

def check_drive_cr_not_exist(self, drive_name: str, timeout: int = 120) -> bool:
"""
Checks if a custom resource (CR) representing a drive with the given name does not exist.
assert self.wait_volume(
name=volume_name,
expected_usage=const.USAGE_RELEASED,
), f"Volume: {volume_name} failed to reach expected usage: {const.USAGE_RELEASED}"
Args:
drive_name (str): The name of the drive CR.
timeout (int, optional): The timeout for checking the CR, defaults to 120.
self.recreate_pod(name=pod_name, namespace=namespace)
Returns:
bool: True if the drive CR was removed within the given timeout, False otherwise.
"""
end_time = time.time() + timeout
while time.time() < end_time:
try:
self.custom_objects_api.get_cluster_custom_object(
group=const.CR_GROUP,
version=const.CR_VERSION,
plural="drives",
name=drive_name,
)
logging.info(f"FOUND DRIVE: {drive_name}")
except ApiException as e:
if e.status == 404:
return True
else:
raise
time.sleep(2)
logging.warning(
f"Drive CR '{drive_name}' still exists after {timeout} seconds timeout."
)
return False


Loading

0 comments on commit 8ff444b

Please sign in to comment.