diff --git a/configs/train_configs/dpo/valpy_dpo_mix.yaml b/configs/train_configs/dpo/valpy_dpo_mix.yaml new file mode 100644 index 000000000..9f2233030 --- /dev/null +++ b/configs/train_configs/dpo/valpy_dpo_mix.yaml @@ -0,0 +1,38 @@ +model_name_or_path: /model +model_revision: main +use_flash_attn: true +gradient_checkpointing: true +dataset_mixer: + allenai/ultrafeedback_binarized_cleaned_train: 61814 + # ai2-adapt-dev/DaringAnteater-prefs-RM-filter: 1618 + # ai2-adapt-dev/hh-rlhf-helpful: 20000 + # ai2-adapt-dev/webgpt-binarized: 14346 + # ai2-adapt-dev/WildChat-prefs-280824: 11487 +# ai2-adapt-dev/UltraInteract_pair_randomlen_Logic: 5726 +# ai2-adapt-dev/UltraInteract_pair_randomlen_Math_PoT: 22800 +# ai2-adapt-dev/UltraInteract_pair_randomlen_Math_CoT: 22483 +# ai2-adapt-dev/UltraInteract_pair_randomlen_Coding: 13600 +# ai2-adapt-dev/UltraInteract_pair_maxlen_Logic: 5726 +# ai2-adapt-dev/UltraInteract_pair_maxlen_Math_PoT: 22800 +# ai2-adapt-dev/UltraInteract_pair_maxlen_Math_CoT: 22483 +# ai2-adapt-dev/UltraInteract_pair_maxlen_Coding: 13600 +tokenizer_name: /model +use_slow_tokenizer: true +max_seq_length: 2048 +preprocessing_num_workers: 16 +per_device_train_batch_size: 1 +gradient_accumulation_steps: 16 # designed for 8 GPUs, so batch size 128 +learning_rate: 5.0e-7 +lr_scheduler_type: linear +warmup_ratio: 0.1 +weight_decay: 0.0 +num_train_epochs: 1 +output_dir: /output +with_tracking: true +report_to: + - wandb +logging_steps: 1 +use_lora: false +dpo_loss_type: dpo_norm +dpo_beta: 5 +checkpointing_steps: 1000 \ No newline at end of file