diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-4k-2xp4de_24xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-4k-2xp4de_24xlarge.yaml index 68d6488df..444a0e935 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-4k-2xp4de_24xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-4k-2xp4de_24xlarge.yaml @@ -14,4 +14,4 @@ deepspeed: config_path: deepspeed_configs/zero_3_llama_2_70b.json flash_attention_2: True worker_resources: - p4de.24xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up \ No newline at end of file + p4de.24xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-chat-4k-2xp4de_24xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-chat-4k-2xp4de_24xlarge.yaml index e45a2d023..792e942e8 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-chat-4k-2xp4de_24xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-70b-chat-4k-2xp4de_24xlarge.yaml @@ -14,4 +14,4 @@ deepspeed: config_path: deepspeed_configs/zero_3_llama_2_70b.json flash_attention_2: True worker_resources: - p4de.24xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up \ No newline at end of file + p4de.24xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up