From eb2ebe03e07e3c07817644e21e32da535461308b Mon Sep 17 00:00:00 2001 From: Rahul Jha Date: Wed, 23 Aug 2023 01:52:25 -0700 Subject: [PATCH] propagating eval_batch_size to TrainingArguments (#675) Co-authored-by: Rahul Jha --- examples/research_projects/stack_llama_2/scripts/dpo_llama2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py index a477628d0e4..079ee87f4bd 100644 --- a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py +++ b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py @@ -164,6 +164,7 @@ def return_prompt_and_responses(samples) -> Dict[str, str]: # 4. initialize training arguments: training_args = TrainingArguments( per_device_train_batch_size=script_args.per_device_train_batch_size, + per_device_eval_batch_size=script_args.per_device_eval_batch_size, max_steps=script_args.max_steps, logging_steps=script_args.logging_steps, save_steps=script_args.save_steps,