-
Notifications
You must be signed in to change notification settings - Fork 2
/
run_sft.sh
43 lines (42 loc) · 1.28 KB
/
run_sft.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
torchrun --nproc_per_node 8 scripts/supervised_finetuning.py \
--model_type baichuan \
--template_name baichuan-chat \
--model_name_or_path ./checkpoints/Qilin-Med-Pretrain \
--train_file_dir ./data/sft \
--validation_file_dir ./data/sft \
--deepspeed config/deepspeed_config_zero2.json \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 4 \
--do_train \
--do_eval \
--use_peft True \
--bf16 \
--max_train_samples 10000000 \
--max_eval_samples 1000 \
--num_train_epochs 1 \
--learning_rate 2e-5 \
--warmup_ratio 0.05 \
--weight_decay 0.05 \
--logging_strategy steps \
--logging_steps 10 \
--eval_steps 50 \
--evaluation_strategy steps \
--save_steps 5000 \
--save_strategy steps \
--save_total_limit 3 \
--gradient_accumulation_steps 1 \
--preprocessing_num_workers 1 \
--max_source_length 256 \
--max_target_length 256 \
--output_dir ./checkpoints/Qilin-Med-SFT \
--overwrite_output_dir \
--ddp_timeout 30000 \
--logging_first_step True \
--target_modules all \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout 0.05 \
--torch_dtype bfloat16 \
--device_map auto \
--ddp_find_unused_parameters False \
--gradient_checkpointing True