Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
kartikayk committed Jan 29, 2024
1 parent a43fb0c commit 04ea4b0
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion recipes/configs/alpaca_llama2_finetune.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ loss: CrossEntropyLoss
output_dir: /tmp/alpaca-llama2-finetune
device: cuda
dtype: fp32
activation_checkpointing: False
activation_checkpointing: True
cpu_offload: False

# Metrics arguments
Expand Down
2 changes: 1 addition & 1 deletion recipes/configs/alpaca_llama2_full_finetune.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Dataset and Dataloader
dataset: alpaca
seed: null
shuffle: False
shuffle: True

# Model Arguments
model: llama2_7b
Expand Down
4 changes: 2 additions & 2 deletions recipes/full_finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def save_checkpoint(self, epoch: int) -> None:

# if training is in-progress, checkpoint the optimizer state as well
if epoch + 1 < self.total_epochs:
ckpt_dict.update({"optimzer": self.optimizer})
ckpt_dict.update({"optimizer": self.optimizer})
utils.save_checkpoint(ckpt_dict, output_loc)

if self.is_rank_zero:
Expand Down Expand Up @@ -288,7 +288,7 @@ def recipe_main() -> None:
# Env variables set by torch run; only need to initialize process group
init_process_group(backend="nccl")

recipe = FullFinetune(params=recipe_params)
recipe = FullFinetuneRecipe(params=recipe_params)
recipe.load_checkpoint(model_checkpoint=recipe_params.model_checkpoint)
recipe.train()

Expand Down

0 comments on commit 04ea4b0

Please sign in to comment.