From d4218146c10f2c5c26b1c1f10ffe2b7940fcd6b1 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Mon, 5 Feb 2024 21:02:01 -0500 Subject: [PATCH] add check for distributed optimizer which is unsupported for PEFT (#8323) Signed-off-by: Chen Cui --- nemo/collections/nlp/parts/mixins/nlp_adapter_mixins.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nemo/collections/nlp/parts/mixins/nlp_adapter_mixins.py b/nemo/collections/nlp/parts/mixins/nlp_adapter_mixins.py index 7741e54c7fdb6..5bfccd8624c7a 100644 --- a/nemo/collections/nlp/parts/mixins/nlp_adapter_mixins.py +++ b/nemo/collections/nlp/parts/mixins/nlp_adapter_mixins.py @@ -175,6 +175,8 @@ def add_adapter(self, peft_cfgs: Union[PEFTConfig, List[PEFTConfig]]): if self.cfg.get('virtual_pipeline_model_parallel_size', None): raise ValueError('Virtual pipeline model parallel is not supported when using PEFT') + if self.cfg.optim.name == "distributed_fused_adam": + raise ValueError('distributed_fused_adam is not supported for PEFT. Please use fused_adam') if not isinstance(peft_cfgs, List): peft_cfgs = [peft_cfgs]