Skip to content

Commit

Permalink
Update cluster list tests
Browse files Browse the repository at this point in the history
  • Loading branch information
carolineechen authored and Alexandra Belousov committed Nov 18, 2024
1 parent 625ecf5 commit ed562d5
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 201 deletions.
2 changes: 1 addition & 1 deletion runhouse/resources/hardware/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -2262,7 +2262,7 @@ def _folder_exists(self, path: Union[str, Path]):
@classmethod
def list(
cls,
show_all: Optional[bool] = False,
show_all: bool = False,
since: Optional[str] = None,
status: Optional[ClustersListStatus] = None,
) -> Dict[str, List[Dict]]:
Expand Down
299 changes: 100 additions & 199 deletions tests/test_resources/test_clusters/test_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
get_random_str,
org_friend_account,
remove_config_keys,
set_cluster_status,
set_output_env_vars,
)

Expand Down Expand Up @@ -994,7 +993,7 @@ def test_observability_enabled_by_default_on_cluster(self, cluster):

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_pythonic(self, cluster):
def test_cluster_list_contains_pythonic(self, cluster):
original_username = rns_client.username
new_username = (
"test-org"
Expand All @@ -1007,23 +1006,15 @@ def test_cluster_list_pythonic(self, cluster):
token=rns_client.token,
original_username=original_username,
):
clusters = Cluster.list()
all_clusters = clusters.get("den_clusters", {})
running_clusters = (
[
den_cluster
for den_cluster in all_clusters
if den_cluster.get("Status") == "running"
]
if all_clusters
else {}
)
assert len(all_clusters) > 0
assert len(running_clusters) > 0
assert len(running_clusters) == len(
all_clusters
all_clusters = Cluster.list(show_all=True).get("den_clusters", {})
running_clusters = Cluster.list().get(
"den_clusters", {}
) # by default we get only running clusters

assert 0 <= len(all_clusters) <= 200 # den limit
assert len(all_clusters) >= len(running_clusters)
assert len(running_clusters) > 0

all_clusters_names = [
den_cluster.get("Name") for den_cluster in all_clusters
]
Expand All @@ -1035,57 +1026,7 @@ def test_cluster_list_pythonic(self, cluster):

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_all_pythonic(self, cluster):
original_username = rns_client.username
new_username = (
"test-org"
if cluster.rns_address.startswith("/test-org/")
else original_username
)

with org_friend_account(
new_username=new_username,
token=rns_client.token,
original_username=original_username,
):
# make sure that we at least one terminated cluster for the tests, (does not matter if the status is mocked)
set_cluster_status(cluster=cluster, status=ResourceServerStatus.terminated)
clusters = Cluster.list(show_all=True)
all_clusters = clusters.get("den_clusters", {})
running_clusters = (
[
den_cluster
for den_cluster in all_clusters
if den_cluster.get("Status") == "running"
]
if all_clusters
else {}
)
assert 0 <= len(all_clusters) <= 200 # den limit
assert len(all_clusters) >= len(running_clusters)

all_clusters_status = set(
[den_cluster.get("Status") for den_cluster in all_clusters]
)

# testing that we don't get just running clusters
assert len(all_clusters) > 1

assert ResourceServerStatus.running.value in all_clusters_status
assert ResourceServerStatus.terminated.value in all_clusters_status

current_cluster_info = [
den_cluster
for den_cluster in all_clusters
if den_cluster.get("Name") == cluster.name
][0]
assert current_cluster_info.get("Status") == "terminated"

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_status_filter_pythonic(self, cluster):
from runhouse.resources.hardware.utils import ResourceServerStatus

def test_cluster_list_status_pythonic(self, cluster):
original_username = rns_client.username
new_username = (
"test-org"
Expand All @@ -1098,64 +1039,19 @@ def test_cluster_list_status_filter_pythonic(self, cluster):
token=rns_client.token,
original_username=original_username,
):
clusters = Cluster.list(status="running")
all_clusters = clusters.get("den_clusters", {})
running_clusters = (
[
den_cluster
for den_cluster in all_clusters
if den_cluster.get("Status") == "running"
]
if all_clusters
else {}
)
assert len(all_clusters) > 0
assert len(running_clusters) > 0
assert len(all_clusters) == len(running_clusters)

all_clusters_status = set(
[den_cluster.get("Status") for den_cluster in all_clusters]
)

supported_statuses_without_running = [
status
for status in list(ResourceServerStatus.__members__.keys())
if status != "running"
]

for status in supported_statuses_without_running:
assert status not in all_clusters_status

assert ResourceServerStatus.running in all_clusters_status

# make sure that we at least one terminated cluster for the tests, (does not matter if the status is mocked)
set_cluster_status(cluster=cluster, status=ResourceServerStatus.terminated)

clusters = Cluster.list(status="terminated")
all_clusters = clusters.get("den_clusters", {})
running_clusters = (
[
den_cluster
for den_cluster in all_clusters
if den_cluster.get("Status") == "running"
]
if all_clusters
else {}
)
assert len(all_clusters) > 0
assert len(running_clusters) == 0

all_clusters_status = set(
[den_cluster.get("Status") for den_cluster in all_clusters]
)
assert "terminated" in all_clusters_status
for status in ["running", "terminated"]:
# check that filtered requests contains only specific status
filtered_clusters = Cluster.list(status=status).get("den_clusters", {})
if filtered_clusters:
filtered_statuses = set(
[cluster.get("Status") for cluster in filtered_clusters]
)
assert filtered_statuses == {status}

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_since_filter_pythonic(self, cluster):
def test_cluster_list_since_pythonic(self, cluster):
cluster.save() # tls exposed local cluster is not saved by default
# make sure that we at least one terminated cluster for the tests, (does not matter if the status is mocked)
set_cluster_status(cluster=cluster, status=ResourceServerStatus.terminated)

original_username = rns_client.username
new_username = (
Expand All @@ -1171,21 +1067,13 @@ def test_cluster_list_since_filter_pythonic(self, cluster):
):
minutes_time_filter = 10
clusters = Cluster.list(since=f"{minutes_time_filter}m")
all_clusters = clusters.get("den_clusters", {})
running_clusters = (
[
den_cluster
for den_cluster in all_clusters
if den_cluster.get("Status") == "running"
]
if all_clusters
else {}
)
assert len(running_clusters) >= 0
assert len(all_clusters) > 0
recent_clusters = clusters.get("den_clusters", {})

clusters_last_active_timestamps = set(
[den_cluster.get("Last Active (UTC)") for den_cluster in all_clusters]
[
den_cluster.get("Last Active (UTC)")
for den_cluster in recent_clusters
]
)

assert len(clusters_last_active_timestamps) >= 1
Expand All @@ -1201,44 +1089,7 @@ def test_cluster_list_since_filter_pythonic(self, cluster):

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_cmd_output_no_filters(self, capsys):
import re
import subprocess

env = set_output_env_vars()

process = subprocess.Popen(
"runhouse cluster list",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
process.wait()
stdout, stderr = process.communicate()
capsys.readouterr()
cmd_stdout = stdout.decode("utf-8")

assert cmd_stdout

# The output is printed as a table.
# testing that the table name is printed correctly
regex = f".*Clusters for {rh.configs.username}.*\(Running: .*/.*, Total Displayed: .*/.*\).*"
assert re.search(regex, cmd_stdout)

# testing that the table column names is printed correctly
col_names = ["┃ Name", "┃ Cluster Type", "┃ Status", "┃ Last Active (UTC)"]
for name in col_names:
assert name in cmd_stdout
assert (
f"Showing clusters that were active in the last {int(LAST_ACTIVE_AT_TIMEFRAME / HOUR)} hours."
in cmd_stdout
)

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_cmd_output_with_filters(self, capsys, cluster):

def test_cluster_list_cmd_output_no_filters(self, capsys, cluster):
import re
import subprocess

Expand All @@ -1254,57 +1105,107 @@ def test_cluster_list_cmd_output_with_filters(self, capsys, cluster):
token=rns_client.token,
original_username=original_username,
):
cluster.save() # tls exposed local cluster is not saved by default

# make sure that we at least one terminated cluster for the tests, (does not matter if the status is mocked)
set_cluster_status(cluster=cluster, status=ResourceServerStatus.terminated)

env = set_output_env_vars()

process = subprocess.Popen(
"runhouse cluster list --status terminated",
"runhouse cluster list",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
process.wait()
stdout, stderr = process.communicate()
stdout = process.communicate()[0]
capsys.readouterr()
cmd_stdout = stdout.decode("utf-8")

assert cmd_stdout

# The output is printed as a table.
# testing that the table name is printed correctly

regex = ".*Clusters for.*\(Running: .*/.*, Total Displayed: .*/.*\).*"
regex = f".*Clusters for {rh.configs.username}.*\(Running: .*/.*, Total Displayed: .*/.*\).*"
assert re.search(regex, cmd_stdout)

# testing that the table column names is printed correctly
displayed_info_names = [
"┃ Name",
"┃ Cluster Type",
"┃ Status",
"┃ Last Active (UTC)",
]
for name in displayed_info_names:
col_names = ["┃ Name", "┃ Cluster Type", "┃ Status", "┃ Last Active (UTC)"]
for name in col_names:
assert name in cmd_stdout

assert (
"Note: the above clusters have registered activity in the last 24 hours."
not in cmd_stdout
f"Showing clusters that were active in the last {int(LAST_ACTIVE_AT_TIMEFRAME / HOUR)} hours."
in cmd_stdout
)
assert cluster.name in cmd_stdout

@pytest.mark.level("local")
@pytest.mark.clustertest
def test_cluster_list_cmd_output_with_filters(self, capsys, cluster):

import re
import subprocess

original_username = rns_client.username
new_username = (
"test-org"
if cluster.rns_address.startswith("/test-org/")
else original_username
)

with org_friend_account(
new_username=new_username,
token=rns_client.token,
original_username=original_username,
):
cluster.save() # tls exposed local cluster is not saved by default

env = set_output_env_vars()

for status in ["running", "terminated"]:

process = subprocess.Popen(
f"runhouse cluster list --status {status}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
process.wait()
stdout = process.communicate()[0]
capsys.readouterr()
cmd_stdout = stdout.decode("utf-8")
assert cmd_stdout

# The output is printed as a table.
# testing that the table name is printed correctly

regex = ".*Clusters for.*\(Running: .*/.*, Total Displayed: .*/.*\).*"
assert re.search(regex, cmd_stdout)

# testing that the table column names is printed correctly
col_names = [
"┃ Name",
"┃ Cluster Type",
"┃ Status",
"┃ Last Active (UTC)",
]
for name in col_names:
assert name in cmd_stdout

assert (
"Note: the above clusters have registered activity in the last 24 hours."
not in cmd_stdout
)

# Removing 'Running' which appearing in the title of the output,
# so we could test the no clusters with status 'Running' is printed
cmd_stdout = cmd_stdout.replace("Running", "")
assert "Terminated" in cmd_stdout
if status == "running":
assert cluster.name in cmd_stdout

statues = list(ResourceServerStatus.__members__.keys())
statues.remove("terminated")
# Check other statuses not found in output
cmd_stdout = cmd_stdout.replace("Running:", "")
statuses = list(ResourceServerStatus.__members__.keys())
statuses.remove(status)

for status in statues:
assert status not in cmd_stdout
for status in statuses:
assert status.capitalize() not in cmd_stdout

@pytest.mark.level("local")
@pytest.mark.clustertest
Expand Down
Loading

0 comments on commit ed562d5

Please sign in to comment.