Skip to content

Commit 4b64508

Browse files
authored
Fix typos (#58349)
Fix typos Signed-off-by: Jiajun Yao <jeromeyjj@gmail.com>
1 parent 1767948 commit 4b64508

File tree

15 files changed

+22
-22
lines changed

15 files changed

+22
-22
lines changed

python/ray/_private/accelerators/amd_gpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def get_visible_accelerator_ids_env_var() -> str:
5353
env_var = CUDA_VISIBLE_DEVICES_ENV_VAR
5454
elif hip_val != cuda_val:
5555
raise ValueError(
56-
f"Inconsistant values found. Please use either {HIP_VISIBLE_DEVICES_ENV_VAR} or {CUDA_VISIBLE_DEVICES_ENV_VAR}."
56+
f"Inconsistent values found. Please use either {HIP_VISIBLE_DEVICES_ENV_VAR} or {CUDA_VISIBLE_DEVICES_ENV_VAR}."
5757
)
5858

5959
return env_var

python/ray/autoscaler/_private/resource_demand_scheduler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@ def _add_min_workers_nodes(
648648
request_resources() constraints.
649649
650650
Args:
651-
node_resources: Resources of exisiting nodes already launched/pending.
651+
node_resources: Resources of existing nodes already launched/pending.
652652
node_type_counts: Counts of existing nodes already launched/pending.
653653
node_types: Node types config.
654654
max_workers: global max_workers constaint.

python/ray/dashboard/modules/reporter/profile_manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ async def attach_profiler(
287287
288288
Returns:
289289
Tuple[bool, str]: A tuple containing a boolean indicating the success
290-
of the operation and a string of a sucess message or an error message.
290+
of the operation and a string of a success message or an error message.
291291
"""
292292
memray = shutil.which(self.profiler_name)
293293
if memray is None:

python/ray/data/_internal/cluster_autoscaler/default_cluster_autoscaler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def __init__(
3636
self._last_request_time = 0
3737

3838
def try_trigger_scaling(self):
39-
"""Try to scale up the cluster to accomodate the provided in-progress workload.
39+
"""Try to scale up the cluster to accommodate the provided in-progress workload.
4040
4141
This makes a resource request to Ray's autoscaler consisting of the current,
4242
aggregate usage of all operators in the DAG + the incremental usage of all

python/ray/experimental/tqdm_ray.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ def close_bar(self, state: ProgressBarState) -> None:
251251
instance().unhide_bars()
252252

253253
def slots_required(self):
254-
"""Return the number of pos slots we need to accomodate bars in this group."""
254+
"""Return the number of pos slots we need to accommodate bars in this group."""
255255
if not self.bars_by_uuid:
256256
return 0
257257
return 1 + max(bar.state["pos"] for bar in self.bars_by_uuid.values())

python/ray/tune/search/variant_generator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def assign_value(spec: Dict, path: Tuple, value: Any):
344344
"""Assigns a value to a nested dictionary.
345345
346346
Handles the special case of tuples, in which case the tuples
347-
will be re-constructed to accomodate the updated value.
347+
will be re-constructed to accommodate the updated value.
348348
"""
349349
parent_spec = None
350350
parent_key = None

python/ray/tune/utils/file_transfer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ def _unpack_dir(stream: io.BytesIO, target_dir: str, *, _retry: bool = True) ->
383383
target_dir = os.path.normpath(target_dir)
384384
try:
385385
# Timeout 0 means there will be only one attempt to acquire
386-
# the file lock. If it cannot be aquired, a TimeoutError
386+
# the file lock. If it cannot be acquired, a TimeoutError
387387
# will be thrown.
388388
with TempFileLock(f"{target_dir}.lock", timeout=0):
389389
with tarfile.open(fileobj=stream) as tar:
@@ -426,7 +426,7 @@ def _copy_dir(
426426
target_dir = os.path.normpath(target_dir)
427427
try:
428428
# Timeout 0 means there will be only one attempt to acquire
429-
# the file lock. If it cannot be aquired, a TimeoutError
429+
# the file lock. If it cannot be acquired, a TimeoutError
430430
# will be thrown.
431431
with TempFileLock(f"{target_dir}.lock", timeout=0):
432432
_delete_path_unsafe(target_dir)

release/tune_tests/fault_tolerance_tests/workloads/terminate_node_aws.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,15 +102,15 @@ def kill(self):
102102
failures = 0
103103
max_failures = 3
104104
node = None
105-
terminated_succesfully = False
106-
while not terminated_succesfully and failures < max_failures:
105+
terminated_successfully = False
106+
while not terminated_successfully and failures < max_failures:
107107
try:
108108
node = get_random_node()
109109
if not node:
110110
logger.info("No alive worker nodes")
111111
continue
112112
terminate_node(node["NodeID"])
113-
terminated_succesfully = True
113+
terminated_successfully = True
114114
logger.info(
115115
f"Killed node {node['NodeID']} with IP {node['NodeManagerAddress']}"
116116
)
@@ -125,7 +125,7 @@ def kill(self):
125125
{
126126
"timestamp": time.time(),
127127
"node": node,
128-
"terminated_succesfully": terminated_succesfully,
128+
"terminated_successfully": terminated_successfully,
129129
}
130130
)
131131
# safe_write_to_results_json(self.history)

release/tune_tests/fault_tolerance_tests/workloads/test_tune_worker_fault_tolerance.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
1111
If a trial is restored, it should restart from the last checkpointed iteration.
1212
13-
The test is succesfull if all trials finish with the expected number of iterations,
13+
The test is successful if all trials finish with the expected number of iterations,
1414
and that a checkpoint is always available when restoring.
1515
1616
This test only works on AWS as it uses AWS CLI to terminate nodes.

rllib/core/rl_module/tests/test_rl_module_specs.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def test_customized_multi_agent_module(self):
8888
spec.build()
8989

9090
def test_get_spec_from_module_multi_agent(self):
91-
"""Tests wether MultiRLModuleSpec.from_module() works."""
91+
"""Tests whether MultiRLModuleSpec.from_module() works."""
9292
env = gym.make("CartPole-v1")
9393
num_agents = 2
9494
module_specs = {}
@@ -107,7 +107,7 @@ def test_get_spec_from_module_multi_agent(self):
107107
self.assertEqual(spec, spec_from_module)
108108

109109
def test_get_spec_from_module_single_agent(self):
110-
"""Tests wether RLModuleSpec.from_module() works."""
110+
"""Tests whether RLModuleSpec.from_module() works."""
111111
env = gym.make("CartPole-v1")
112112
spec = RLModuleSpec(
113113
module_class=VPGTorchRLModule,
@@ -121,7 +121,7 @@ def test_get_spec_from_module_single_agent(self):
121121
self.assertEqual(spec, spec_from_module)
122122

123123
def test_update_specs(self):
124-
"""Tests wether RLModuleSpec.update() works."""
124+
"""Tests whether RLModuleSpec.update() works."""
125125
env = gym.make("CartPole-v0")
126126

127127
# Test if RLModuleSpec.update() works.

0 commit comments

Comments
 (0)