Skip to content

Commit

Permalink
Fix MCoreGPTModel import in llm.gpt.model.base (NVIDIA#11109)
Browse files Browse the repository at this point in the history
Signed-off-by: Hemil Desai <hemild@nvidia.com>
  • Loading branch information
hemildesai authored and XuesongYang committed Jan 18, 2025
1 parent 56ab393 commit 493f3f4
Showing 1 changed file with 1 addition and 3 deletions.
4 changes: 1 addition & 3 deletions nemo/collections/llm/gpt/model/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import torch.distributed
from megatron.core.inference.model_inference_wrappers.gpt.gpt_inference_wrapper import GPTInferenceWrapper
from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel
from megatron.core.optimizer import OptimizerConfig
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_config import TransformerConfig
Expand All @@ -44,8 +45,6 @@
_grad_accum_fusion_available = False

if TYPE_CHECKING:
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel

from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec


Expand Down Expand Up @@ -189,7 +188,6 @@ def configure_model(self, tokenizer) -> "MCoreGPTModel":
) % vp_size == 0, "Make sure the number of model chunks is the same across all pipeline stages."

from megatron.core import parallel_state
from megatron.core.models.gpt.gpt_model import GPTModel as MCoreGPTModel

transformer_layer_spec = self.transformer_layer_spec
if not isinstance(transformer_layer_spec, ModuleSpec):
Expand Down

0 comments on commit 493f3f4

Please sign in to comment.