Skip to content

Commit

Permalink
Addressing code review comments
Browse files Browse the repository at this point in the history
Signed-off-by: Boris Fomitchev <bfomitchev@nvidia.com>
  • Loading branch information
borisfom committed Jul 6, 2024
1 parent 7f8105f commit 1d1437d
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 30 deletions.
23 changes: 6 additions & 17 deletions examples/nlp/language_modeling/megatron_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@

from omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.export import Dim

from nemo.collections.nlp.models.language_modeling.megatron_bart_model import MegatronBARTModel
from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel
Expand Down Expand Up @@ -75,7 +74,7 @@ def nemo_export(cfg):
assert nemo_in is not None, "NeMo model not provided. Please provide the path to the .nemo or .ckpt file"

onnx_out = cfg.onnx_model_file
print(f"onnx_out: {onnx_out}")

trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
Expand Down Expand Up @@ -150,28 +149,18 @@ def nemo_export(cfg):
try:
model.to(device=cfg.export_options.device).freeze()
model.eval()

sequence = "sequence"
batch = "batch"

use_dynamo = False
if use_dynamo:
sequence = Dim("sequence")
batch = Dim("batch")

model.export(
onnx_out,
onnx_opset_version=cfg.export_options.onnx_opset,
do_constant_folding=cfg.export_options.do_constant_folding,
dynamic_axes={
'input_ids': {0: "sequence", 1: "batch"},
'position_ids': {0: "sequence", 1: "batch"},
'logits': {0: "sequence", 1: "batch"},
},
check_trace=check_trace,
check_tolerance=cfg.export_options.check_tolerance,
verbose=cfg.export_options.verbose,
dynamic_axes={
'input_ids': {0: sequence, 1: batch},
'position_ids': {0: sequence, 1: batch},
'logits': {0: sequence, 1: batch},
},
use_dynamo=use_dynamo,
)
except Exception as e:
logging.error(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def __init__(self, model):

self.dtype = utils_funcs.torch_dtype_from_precision(model.cfg.precision)

def forward(self, input_ids, position_ids, attention_mask):
def forward(self, tokens, position_ids, attention_mask):
if self.fp8_enabled and HAVE_TE:
with (
transformer_engine.pytorch.onnx_export(self.fp8_enabled),
Expand All @@ -207,12 +207,10 @@ def forward(self, input_ids, position_ids, attention_mask):
warnings.catch_warnings(),
):
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning, module=r'.*')
assert input_ids.shape == position_ids.shape
assert (
attention_mask.shape[2] == attention_mask.shape[3] == input_ids.shape[1] == position_ids.shape[1]
)
assert tokens.shape == position_ids.shape
assert attention_mask.shape[2] == attention_mask.shape[3] == tokens.shape[1] == position_ids.shape[1]
output_tensor = self.model.forward(
tokens=input_ids.cuda(),
tokens=tokens.cuda(),
text_position_ids=position_ids.cuda(),
attention_mask=attention_mask.cuda(),
labels=None,
Expand All @@ -225,12 +223,10 @@ def forward(self, input_ids, position_ids, attention_mask):
warnings.catch_warnings(),
):
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning, module=r'.*')
assert input_ids.shape == position_ids.shape
assert (
attention_mask.shape[2] == attention_mask.shape[3] == input_ids.shape[1] == position_ids.shape[1]
)
assert tokens.shape == position_ids.shape
assert attention_mask.shape[2] == attention_mask.shape[3] == tokens.shape[1] == position_ids.shape[1]
output_tensor = self.model.forward(
tokens=input_ids.cuda(),
tokens=tokens.cuda(),
text_position_ids=position_ids.cuda(),
attention_mask=attention_mask.cuda(),
labels=None,
Expand Down
2 changes: 0 additions & 2 deletions nemo/utils/export_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,6 @@ def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01):
from apex.normalization import MixedFusedRMSNorm
from apex.normalization.fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm
from megatron.core.fusions.fused_layer_norm import FusedLayerNorm as MCoreFusedLayerNorm

# from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear

Expand Down

0 comments on commit 1d1437d

Please sign in to comment.