Skip to content

Commit b4177e4

Browse files
authored
Use new dlpack api; Formatting fixes (#7101)
Fix CI issues by using new dlpack [api](https://pytorch.org/docs/stable/_modules/torch/utils/dlpack.html#from_dlpack) Minor pre-commit fixes. Signed-off-by: Olatunji Ruwase <olruwase@microsoft.com>
1 parent 02bbf50 commit b4177e4

File tree

8 files changed

+14
-15
lines changed

8 files changed

+14
-15
lines changed

deepspeed/autotuning/autotuner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def __init__(self, args, active_resources):
7171
logger.info(f"Created autotuning experiments directory: {self.exps_dir}")
7272
except:
7373
logger.error(
74-
f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job."
74+
f"Failed to create {self.exps_dir}, please check exps_dir in the autotuning config file is accessible by all the nodes in the job."
7575
)
7676
exit(-1)
7777

@@ -84,7 +84,7 @@ def __init__(self, args, active_resources):
8484
logger.info(f"Created autotuning results directory: {self.exps_dir}")
8585
except:
8686
logger.error(
87-
f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job."
87+
f"Failed to create {self.results_dir}, please check results_dir in the autotuning config file is accessible by all the nodes in the job."
8888
)
8989
exit(-1)
9090

deepspeed/inference/engine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,7 @@ def _generate(self, *inputs, **kwargs):
600600
tensor_length = input_tensor.shape[-1]
601601
if tensor_length > self._config.max_out_tokens:
602602
raise RuntimeError(
603-
f"Input with size {tensor_length} exceeds maximum length of {self._config.max_out_tokens}. Please increase `max_tokens` in the DeepSpeed Inference Config."
603+
f"Input with size {tensor_length} exceeds maximum length of {self._config.max_out_tokens}. Please increase max_tokens in the DeepSpeed Inference Config."
604604
)
605605

606606
return self.module.generate(*inputs, **kwargs)

deepspeed/launcher/runner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -491,7 +491,7 @@ def main(args=None):
491491
args.master_addr = result.decode('utf-8').split()[0]
492492
if not args.master_addr:
493493
raise RuntimeError(
494-
f"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
494+
"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
495495
)
496496
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")
497497

deepspeed/runtime/compression/cupy.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ def __init__(self):
1414
pass
1515

1616
def torch2cupy(self, tensor):
17-
return cupy.fromDlpack(to_dlpack(tensor))
17+
return cupy.from_dlpack(to_dlpack(tensor))
1818

1919
def cupy2torch(self, cupy_tensor):
20-
return from_dlpack(cupy_tensor.toDlpack())
20+
return from_dlpack(cupy_tensor)
2121

2222
def compress_by_chunk(self, cupy_bool_tensor, num_chunks):
2323
packed_sign = cupy.packbits(cupy_bool_tensor)

deepspeed/runtime/engine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3790,7 +3790,7 @@ def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin", exclude_
37903790
else:
37913791
# the model will be bogus if not consolidated so don't confuse the user by saving it
37923792
logger.info(
3793-
f"Did not save the model {path} because `stage3_gather_16bit_weights_on_model_save` is False")
3793+
f"Did not save the model {path} because stage3_gather_16bit_weights_on_model_save is False")
37943794
return False
37953795
else:
37963796
state_dict = self.module_state_dict(exclude_frozen_parameters=exclude_frozen_parameters)

deepspeed/runtime/zero/partition_parameters.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -944,8 +944,7 @@ def __init__(self,
944944
"""
945945
if config is not None:
946946
config_dict_or_path = config
947-
logger.warning(
948-
f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
947+
logger.warning('zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
949948
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,
950949
mpu) if config_dict_or_path is not None else None
951950
if _ds_config is not None:

deepspeed/utils/logging.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,6 @@ def should_log_le(max_log_level_str):
158158

159159
max_log_level_str = max_log_level_str.lower()
160160
if max_log_level_str not in log_levels:
161-
raise ValueError(f"{max_log_level_str} is not one of the `logging` levels")
161+
raise ValueError(f"{max_log_level_str} is not one of the logging levels")
162162

163163
return get_current_level() <= log_levels[max_log_level_str]

op_builder/builder.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -412,8 +412,8 @@ def cpu_arch(self):
412412
try:
413413
cpu_info = get_cpu_info()
414414
except Exception as e:
415-
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
416-
"falling back to `lscpu` to get this information.")
415+
self.warning(f"{self.name} attempted to use py-cpuinfo but failed (exception type: {type(e)}, {e}), "
416+
"falling back to lscpu to get this information.")
417417
cpu_info = self._backup_cpuinfo()
418418
if cpu_info is None:
419419
return "-march=native"
@@ -471,8 +471,8 @@ def simd_width(self):
471471
try:
472472
cpu_info = get_cpu_info()
473473
except Exception as e:
474-
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
475-
"falling back to `lscpu` to get this information.")
474+
self.warning(f"{self.name} attempted to use py-cpuinfo but failed (exception type: {type(e)}, {e}), "
475+
"falling back to lscpu to get this information.")
476476
cpu_info = self._backup_cpuinfo()
477477
if cpu_info is None:
478478
return '-D__SCALAR__'
@@ -642,7 +642,7 @@ def compute_capability_args(self, cross_compile_archs=None):
642642
if cross_compile_archs_env is not None:
643643
if cross_compile_archs is not None:
644644
print(
645-
f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
645+
f"{WARNING} env var TORCH_CUDA_ARCH_LIST={cross_compile_archs_env} overrides cross_compile_archs={cross_compile_archs}"
646646
)
647647
cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
648648
else:

0 commit comments

Comments
 (0)