Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python/ray/_private/accelerators/amd_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def get_visible_accelerator_ids_env_var() -> str:
env_var = CUDA_VISIBLE_DEVICES_ENV_VAR
elif hip_val != cuda_val:
raise ValueError(
f"Inconsistant values found. Please use either {HIP_VISIBLE_DEVICES_ENV_VAR} or {CUDA_VISIBLE_DEVICES_ENV_VAR}."
f"Inconsistent values found. Please use either {HIP_VISIBLE_DEVICES_ENV_VAR} or {CUDA_VISIBLE_DEVICES_ENV_VAR}."
)

return env_var
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ def _add_min_workers_nodes(
request_resources() constraints.

Args:
node_resources: Resources of exisiting nodes already launched/pending.
node_resources: Resources of existing nodes already launched/pending.
node_type_counts: Counts of existing nodes already launched/pending.
node_types: Node types config.
max_workers: global max_workers constaint.
Expand Down
2 changes: 1 addition & 1 deletion python/ray/dashboard/modules/reporter/profile_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ async def attach_profiler(

Returns:
Tuple[bool, str]: A tuple containing a boolean indicating the success
of the operation and a string of a sucess message or an error message.
of the operation and a string of a success message or an error message.
Comment on lines 289 to +290
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The type hint in the docstring for attach_profiler is Tuple[bool, str], but the implementation returns a 3-element tuple: (bool, Optional[str], str). This is misleading and can cause issues for developers using this function.

The docstring and the function's type hint should be updated to reflect the actual return type. Please also update the function signature on line 274 from -> (bool, str) to -> Tuple[bool, Optional[str], str] and add the necessary imports from typing (Tuple, Optional).

Suggested change
Tuple[bool, str]: A tuple containing a boolean indicating the success
of the operation and a string of a sucess message or an error message.
of the operation and a string of a success message or an error message.
Tuple[bool, Optional[str], str]: A tuple containing a boolean indicating the
success of the operation, the profiler filename if successful, and a string with a success or an error message.

"""
memray = shutil.which(self.profiler_name)
if memray is None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(
self._last_request_time = 0

def try_trigger_scaling(self):
"""Try to scale up the cluster to accomodate the provided in-progress workload.
"""Try to scale up the cluster to accommodate the provided in-progress workload.

This makes a resource request to Ray's autoscaler consisting of the current,
aggregate usage of all operators in the DAG + the incremental usage of all
Expand Down
2 changes: 1 addition & 1 deletion python/ray/experimental/tqdm_ray.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def close_bar(self, state: ProgressBarState) -> None:
instance().unhide_bars()

def slots_required(self):
"""Return the number of pos slots we need to accomodate bars in this group."""
"""Return the number of pos slots we need to accommodate bars in this group."""
if not self.bars_by_uuid:
return 0
return 1 + max(bar.state["pos"] for bar in self.bars_by_uuid.values())
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/search/variant_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def assign_value(spec: Dict, path: Tuple, value: Any):
"""Assigns a value to a nested dictionary.

Handles the special case of tuples, in which case the tuples
will be re-constructed to accomodate the updated value.
will be re-constructed to accommodate the updated value.
"""
parent_spec = None
parent_key = None
Expand Down
4 changes: 2 additions & 2 deletions python/ray/tune/utils/file_transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def _unpack_dir(stream: io.BytesIO, target_dir: str, *, _retry: bool = True) ->
target_dir = os.path.normpath(target_dir)
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be aquired, a TimeoutError
# the file lock. If it cannot be acquired, a TimeoutError
# will be thrown.
with TempFileLock(f"{target_dir}.lock", timeout=0):
with tarfile.open(fileobj=stream) as tar:
Expand Down Expand Up @@ -426,7 +426,7 @@ def _copy_dir(
target_dir = os.path.normpath(target_dir)
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be aquired, a TimeoutError
# the file lock. If it cannot be acquired, a TimeoutError
# will be thrown.
with TempFileLock(f"{target_dir}.lock", timeout=0):
_delete_path_unsafe(target_dir)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,15 +102,15 @@ def kill(self):
failures = 0
max_failures = 3
node = None
terminated_succesfully = False
while not terminated_succesfully and failures < max_failures:
terminated_successfully = False
while not terminated_successfully and failures < max_failures:
try:
node = get_random_node()
if not node:
logger.info("No alive worker nodes")
continue
terminate_node(node["NodeID"])
terminated_succesfully = True
terminated_successfully = True
logger.info(
f"Killed node {node['NodeID']} with IP {node['NodeManagerAddress']}"
)
Expand All @@ -125,7 +125,7 @@ def kill(self):
{
"timestamp": time.time(),
"node": node,
"terminated_succesfully": terminated_succesfully,
"terminated_successfully": terminated_successfully,
}
)
# safe_write_to_results_json(self.history)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

If a trial is restored, it should restart from the last checkpointed iteration.

The test is succesfull if all trials finish with the expected number of iterations,
The test is successful if all trials finish with the expected number of iterations,
and that a checkpoint is always available when restoring.

This test only works on AWS as it uses AWS CLI to terminate nodes.
Expand Down
6 changes: 3 additions & 3 deletions rllib/core/rl_module/tests/test_rl_module_specs.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def test_customized_multi_agent_module(self):
spec.build()

def test_get_spec_from_module_multi_agent(self):
"""Tests wether MultiRLModuleSpec.from_module() works."""
"""Tests whether MultiRLModuleSpec.from_module() works."""
env = gym.make("CartPole-v1")
num_agents = 2
module_specs = {}
Expand All @@ -107,7 +107,7 @@ def test_get_spec_from_module_multi_agent(self):
self.assertEqual(spec, spec_from_module)

def test_get_spec_from_module_single_agent(self):
"""Tests wether RLModuleSpec.from_module() works."""
"""Tests whether RLModuleSpec.from_module() works."""
env = gym.make("CartPole-v1")
spec = RLModuleSpec(
module_class=VPGTorchRLModule,
Expand All @@ -121,7 +121,7 @@ def test_get_spec_from_module_single_agent(self):
self.assertEqual(spec, spec_from_module)

def test_update_specs(self):
"""Tests wether RLModuleSpec.update() works."""
"""Tests whether RLModuleSpec.update() works."""
env = gym.make("CartPole-v0")

# Test if RLModuleSpec.update() works.
Expand Down
2 changes: 1 addition & 1 deletion rllib/offline/estimators/tests/test_ope_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def action_distribution_fn(self, model, obs_batch=None, **kwargs):

# add 0.5 to the action that gave a good reward (2) and subtract 0.5 from the
# action that gave a bad reward (1)
# to acheive this I can just subtract 1.5 from old_reward
# to achieve this I can just subtract 1.5 from old_reward
delta = old_rewards - 1.5
if not self.improved:
# reverse the logic for a worse policy
Expand Down
2 changes: 1 addition & 1 deletion src/ray/common/cgroup2/sysfs_cgroup_driver.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class SysFsCgroupDriver : public CgroupDriverInterface {

/**
Reads the cgroup.procs of "from" and writes them out to the given file.
The cgroup.procs file is newline seperated. The current user must have
The cgroup.procs file is newline separated. The current user must have
read-write permissions to both cgroup.procs file as well as the common ancestor
of the source and destination cgroups.

Expand Down
2 changes: 1 addition & 1 deletion src/ray/gcs/gcs_placement_group_scheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ class GcsPlacementGroupScheduler : public GcsPlacementGroupSchedulerInterface {
///
/// \param bundle The node to which the bundle is scheduled and the bundle's
/// specification.
/// \return True if the bundle is succesfully released. False otherwise.
/// \return True if the bundle is successfully released. False otherwise.
bool TryReleasingBundleResources(
const std::pair<NodeID, std::shared_ptr<const BundleSpecification>> &bundle);

Expand Down
4 changes: 2 additions & 2 deletions src/ray/gcs/gcs_worker_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,10 @@ class GcsWorkerManager : public rpc::WorkerInfoGcsServiceHandler {
std::vector<std::function<void(std::shared_ptr<rpc::WorkerTableData>)>>
worker_dead_listeners_;

/// Tracks the number of occurences of worker crash due to system error
/// Tracks the number of occurrences of worker crash due to system error
int32_t worker_crash_system_error_count_ = 0;

/// Tracks the number of occurences of worker crash due to OOM
/// Tracks the number of occurrences of worker crash due to OOM
int32_t worker_crash_oom_count_ = 0;

/// Ray metrics
Expand Down
2 changes: 1 addition & 1 deletion src/ray/raylet/local_object_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ void LocalObjectManager::SpillObjectsInternal(
io_worker_pool_.PushSpillWorker(io_worker);
size_t num_objects_spilled = status.ok() ? r.spilled_objects_url_size() : 0;
// Object spilling is always done in the order of the request.
// For example, if an object succeeded, it'll guarentee that all objects
// For example, if an object succeeded, it'll guarantee that all objects
// before this will succeed.
RAY_CHECK(num_objects_spilled <= requested_objects_to_spill.size());
for (size_t i = num_objects_spilled; i != requested_objects_to_spill.size();
Expand Down