Skip to content

Commit

Permalink
Fix a couple more format calls
Browse files Browse the repository at this point in the history
  • Loading branch information
akx committed Feb 7, 2024
1 parent 031216b commit d81dff0
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 9 deletions.
2 changes: 1 addition & 1 deletion docs/source/usage_guides/deepspeed.md
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ ZeRO Stage-3 has 2 options:
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
```python
success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)
status_msg = "checkpointing: PATH={}, ckpt_id={}".format(PATH, ckpt_id)
status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}"
if success:
logging.info(f"Success {status_msg}")
else:
Expand Down
5 changes: 2 additions & 3 deletions src/accelerate/accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,15 +429,14 @@ def __init__(
# Mixed precision attributes
self.scaler = None
self.native_amp = False
err = "{mode} mixed precision requires {requirement}"
if (
self.state.mixed_precision == "fp16"
and self.device.type != "cpu"
and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
):
self.native_amp = True
if self.device.type not in ("xpu", "cuda", "mps", "npu"):
raise ValueError(err.format(mode="fp16", requirement="a GPU"))
raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
if self.distributed_type == DistributedType.FSDP:
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
Expand All @@ -457,7 +456,7 @@ def __init__(
else:
self.native_amp = is_bf16_available(True)
if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available():
raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")

# Start of internal step tracking
self.step = 0
Expand Down
4 changes: 1 addition & 3 deletions src/accelerate/commands/launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -924,14 +924,12 @@ def _validate_launch_command(args):
args.mixed_precision = defaults.mixed_precision
mp_from_config_flag = True
else:
native_amp = False
err = "{mode} mixed precision requires {requirement}"
if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
native_amp = is_torch_version(">=", "1.10")
else:
native_amp = is_bf16_available(True)
if args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_tpu_available()):
raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")

# Silently set the default here
if args.dynamo_backend is None:
Expand Down
4 changes: 2 additions & 2 deletions src/accelerate/utils/megatron_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -844,8 +844,8 @@ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
if getattr(args, key, None) is not None:
if args.rank == 0:
print(
f"WARNING: overriding default arguments for {key}:{getattr(args, key)} \
with {key}:{value}",
f"WARNING: overriding default arguments for "
f"{key}:{getattr(args, key)} with {key}:{value}",
flush=True,
)
setattr(args, key, value)
Expand Down

0 comments on commit d81dff0

Please sign in to comment.