-
Notifications
You must be signed in to change notification settings - Fork 188
/
table1.sh
executable file
·33 lines (29 loc) · 3.65 KB
/
table1.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#!/bin/bash
# Continue on error
set +e
# List of commands
commands=(
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 512 --num_epochs 1 --train_type qlora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type qlora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 2048 --num_epochs 1 --train_type qlora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 4096 --num_epochs 1 --train_type qlora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 512 --num_epochs 1 --train_type lora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type lora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 2048 --num_epochs 1 --train_type lora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 4096 --num_epochs 1 --train_type lora --use_gradient_checkpointing True --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type lora --use_gradient_checkpointing False --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 4096 --num_epochs 1 --train_type lora --use_gradient_checkpointing False --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type qlora --use_gradient_checkpointing False --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 4096 --num_epochs 1 --train_type qlora --use_gradient_checkpointing False --use_cpu_offload False --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type lora --use_gradient_checkpointing False --use_cpu_offload True --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type lora --use_gradient_checkpointing True --use_cpu_offload True --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type qlora --use_gradient_checkpointing False --use_cpu_offload True --log_to wandb --dataset dummy"
"python train.py --model_name meta-llama/Llama-2-7b-hf --batch_size 1 --context_length 1024 --num_epochs 1 --train_type qlora --use_gradient_checkpointing True --use_cpu_offload True --log_to wandb --dataset dummy"
)
# Execute each command
for cmd in "${commands[@]}"; do
echo "Executing: $cmd"
$cmd
done
# Optional: stop on error for subsequent commands
set -e