Skip to content

Commit

Permalink
Merge pull request #105 from viktoriussuwandi/viktoriussuwandi-patch
Browse files Browse the repository at this point in the history
Viktoriussuwandi patch
  • Loading branch information
winglian authored May 30, 2023
2 parents 1cd2bbb + 6674780 commit ad50900
Show file tree
Hide file tree
Showing 16 changed files with 16 additions and 16 deletions.
2 changes: 1 addition & 1 deletion configs/cerebras_1_3B_alpaca.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project: pythia-1.4b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-alpaca
batch_size: 32
micro_batch_size: 4
Expand Down
2 changes: 1 addition & 1 deletion configs/galactica_1_3B.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-llama-alpaca
batch_size: 32
micro_batch_size: 16
Expand Down
2 changes: 1 addition & 1 deletion configs/gpt_neox_20b.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project: gpt4all-neox-20b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./gpt4all-neox-20b
batch_size: 48
micro_batch_size: 4
Expand Down
2 changes: 1 addition & 1 deletion configs/llama_13B_alpaca.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./llama-13b-sharegpt
batch_size: 64
micro_batch_size: 2
Expand Down
2 changes: 1 addition & 1 deletion configs/llama_65B_alpaca.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ lora_fan_in_fan_out: false
wandb_project: llama-65b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-llama-alpaca
batch_size: 128
micro_batch_size: 16
Expand Down
2 changes: 1 addition & 1 deletion configs/llama_7B_4bit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-test
batch_size: 8
micro_batch_size: 2
Expand Down
2 changes: 1 addition & 1 deletion configs/llama_7B_alpaca.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ lora_fan_in_fan_out: false
wandb_project: llama-7b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-llama-alpaca
batch_size: 128
micro_batch_size: 16
Expand Down
2 changes: 1 addition & 1 deletion configs/llama_7B_jeopardy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
wandb_project: jeopardy-bot-7b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./jeopardy-bot-7b
batch_size: 4
micro_batch_size: 1
Expand Down
2 changes: 1 addition & 1 deletion configs/pythia_1_2B_alpaca.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project: pythia-1.4b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-alpaca
batch_size: 48
micro_batch_size: 4
Expand Down
2 changes: 1 addition & 1 deletion configs/quickstart.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-test
batch_size: 4
micro_batch_size: 1
Expand Down
2 changes: 1 addition & 1 deletion configs/sample.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
# where to save the finsihed model to
output_dir: ./completed-model
# training hyperparameters
Expand Down
2 changes: 1 addition & 1 deletion configs/stability_3b.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ lora_fan_in_fan_out: false
wandb_project: stable-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./stable-alpaca-3b
batch_size: 2
micro_batch_size: 1
Expand Down
2 changes: 1 addition & 1 deletion configs/vicuna_13B_4bit_reflect.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-reflect
batch_size: 8
micro_batch_size: 2
Expand Down
2 changes: 1 addition & 1 deletion examples/gptq-lora-7b/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project: llama-7b-lora-int4
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./llama-7b-lora-int4
batch_size: 1
micro_batch_size: 1
Expand Down
2 changes: 1 addition & 1 deletion examples/mpt-7b/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
wandb_project: mpt-alpaca-7b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./mpt-alpaca-7b
batch_size: 1
micro_batch_size: 1
Expand Down
2 changes: 1 addition & 1 deletion examples/redpajama/config-3b.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ lora_fan_in_fan_out: false
wandb_project: redpajama-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./redpajama-alpaca-3b
batch_size: 4
micro_batch_size: 1
Expand Down

0 comments on commit ad50900

Please sign in to comment.