Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions deepspeed/autotuning/autotuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def __init__(self, args, active_resources):
logger.info(f"Created autotuning experiments directory: {self.exps_dir}")
except:
logger.error(
f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job."
f"Failed to create {self.exps_dir}, please check exps_dir in the autotuning config file is accessible by all the nodes in the job."
)
exit(-1)

Expand All @@ -84,7 +84,7 @@ def __init__(self, args, active_resources):
logger.info(f"Created autotuning results directory: {self.exps_dir}")
except:
logger.error(
f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job."
f"Failed to create {self.results_dir}, please check results_dir in the autotuning config file is accessible by all the nodes in the job."
)
exit(-1)

Expand Down
2 changes: 1 addition & 1 deletion deepspeed/inference/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@ def _generate(self, *inputs, **kwargs):
tensor_length = input_tensor.shape[-1]
if tensor_length > self._config.max_out_tokens:
raise RuntimeError(
f"Input with size {tensor_length} exceeds maximum length of {self._config.max_out_tokens}. Please increase `max_tokens` in the DeepSpeed Inference Config."
f"Input with size {tensor_length} exceeds maximum length of {self._config.max_out_tokens}. Please increase max_tokens in the DeepSpeed Inference Config."
)

return self.module.generate(*inputs, **kwargs)
Expand Down
2 changes: 1 addition & 1 deletion deepspeed/launcher/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def main(args=None):
args.master_addr = result.decode('utf-8').split()[0]
if not args.master_addr:
raise RuntimeError(
f"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
)
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")

Expand Down
4 changes: 2 additions & 2 deletions deepspeed/runtime/compression/cupy.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ def __init__(self):
pass

def torch2cupy(self, tensor):
return cupy.fromDlpack(to_dlpack(tensor))
return cupy.from_dlpack(to_dlpack(tensor))

def cupy2torch(self, cupy_tensor):
return from_dlpack(cupy_tensor.toDlpack())
return from_dlpack(cupy_tensor)

def compress_by_chunk(self, cupy_bool_tensor, num_chunks):
packed_sign = cupy.packbits(cupy_bool_tensor)
Expand Down
2 changes: 1 addition & 1 deletion deepspeed/runtime/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -3790,7 +3790,7 @@ def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin", exclude_
else:
# the model will be bogus if not consolidated so don't confuse the user by saving it
logger.info(
f"Did not save the model {path} because `stage3_gather_16bit_weights_on_model_save` is False")
f"Did not save the model {path} because stage3_gather_16bit_weights_on_model_save is False")
return False
else:
state_dict = self.module_state_dict(exclude_frozen_parameters=exclude_frozen_parameters)
Expand Down
3 changes: 1 addition & 2 deletions deepspeed/runtime/zero/partition_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -944,8 +944,7 @@ def __init__(self,
"""
if config is not None:
config_dict_or_path = config
logger.warning(
f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
logger.warning('zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,
mpu) if config_dict_or_path is not None else None
if _ds_config is not None:
Expand Down
2 changes: 1 addition & 1 deletion deepspeed/utils/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,6 @@ def should_log_le(max_log_level_str):

max_log_level_str = max_log_level_str.lower()
if max_log_level_str not in log_levels:
raise ValueError(f"{max_log_level_str} is not one of the `logging` levels")
raise ValueError(f"{max_log_level_str} is not one of the logging levels")

return get_current_level() <= log_levels[max_log_level_str]
10 changes: 5 additions & 5 deletions op_builder/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,8 +412,8 @@ def cpu_arch(self):
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
self.warning(f"{self.name} attempted to use py-cpuinfo but failed (exception type: {type(e)}, {e}), "
"falling back to lscpu to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
Expand Down Expand Up @@ -471,8 +471,8 @@ def simd_width(self):
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
self.warning(f"{self.name} attempted to use py-cpuinfo but failed (exception type: {type(e)}, {e}), "
"falling back to lscpu to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
Expand Down Expand Up @@ -642,7 +642,7 @@ def compute_capability_args(self, cross_compile_archs=None):
if cross_compile_archs_env is not None:
if cross_compile_archs is not None:
print(
f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
f"{WARNING} env var TORCH_CUDA_ARCH_LIST={cross_compile_archs_env} overrides cross_compile_archs={cross_compile_archs}"
)
cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
else:
Expand Down