Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable onnxruntime training #14

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/pytorch/language-modeling/run_mlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,7 @@ def main():
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
"ort": True if training_args.ort else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/question-answering/run_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,7 @@ def main():
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
ort = training_args.ort,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/summarization/run_summarization.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,7 @@ def main():
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
ort=True if training_args.ort else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/text-classification/run_glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,7 @@ def main():
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
ort=True if training_args.ort else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
Expand Down
1 change: 1 addition & 0 deletions examples/pytorch/translation/run_translation.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,7 @@ def main():
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
ort=True if training_args.ort else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
Expand Down
5 changes: 5 additions & 0 deletions src/transformers/configuration_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,10 @@ class PretrainedConfig(PushToHubMixin):

use_bfloat16 (`bool`, *optional*, defaults to `False`):
Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).

Onnxruntime specific parameters

- **ort** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should use ORT.
"""
model_type: str = ""
is_composition: bool = False
Expand All @@ -260,6 +264,7 @@ def __init__(self, **kwargs):
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.ort = kwargs.pop("ort", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
Expand Down
48 changes: 38 additions & 10 deletions src/transformers/models/deberta_v2/modeling_deberta_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
from ...pytorch_utils import softmax_backward_data
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta_v2 import DebertaV2Config
from .jit_tracing import traceable


logger = logging.get_logger(__name__)
Expand All @@ -56,7 +57,10 @@ class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = StableDropout(config.pooler_dropout)
if config.ort:
self.dropout = TorchNNDropout(config.pooler_dropout)
else:
self.dropout = StableDropout(config.pooler_dropout)
self.config = config

def forward(self, hidden_states):
Expand All @@ -74,6 +78,7 @@ def output_dim(self):
return self.config.hidden_size


@traceable
# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
class XSoftmax(torch.autograd.Function):
"""
Expand Down Expand Up @@ -164,7 +169,7 @@ def get_mask(input, local_context):

return mask, dropout


@traceable
# Copied from transformers.models.deberta.modeling_deberta.XDropout
class XDropout(torch.autograd.Function):
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
Expand All @@ -187,6 +192,9 @@ def backward(ctx, grad_output):
else:
return grad_output, None

class TorchNNDropout(torch.nn.Dropout):
def __init__(self, drop_prob):
super().__init__(drop_prob)

# Copied from transformers.models.deberta.modeling_deberta.StableDropout
class StableDropout(nn.Module):
Expand Down Expand Up @@ -244,7 +252,10 @@ def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
if config.ort:
self.dropout = TorchNNDropout(config.hidden_dropout_prob)
else:
self.dropout = StableDropout(config.hidden_dropout_prob)

def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
Expand Down Expand Up @@ -312,7 +323,10 @@ def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
if config.ort:
self.dropout = TorchNNDropout(config.hidden_dropout_prob)
else:
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config

def forward(self, hidden_states, input_tensor):
Expand Down Expand Up @@ -367,7 +381,10 @@ def __init__(self, config):
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
if config.ort:
self.dropout = TorchNNDropout(config.hidden_dropout_prob)
else:
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config

def forward(self, hidden_states, residual_states, input_mask):
Expand Down Expand Up @@ -624,16 +641,21 @@ def __init__(self, config):
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets

self.pos_dropout = StableDropout(config.hidden_dropout_prob)
if config.ort:
self.pos_dropout = TorchNNDropout(config.hidden_dropout_prob)
else:
self.pos_dropout = StableDropout(config.hidden_dropout_prob)

if not self.share_att_key:
if "c2p" in self.pos_att_type:
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
if "p2c" in self.pos_att_type:
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)

self.dropout = StableDropout(config.attention_probs_dropout_prob)
if config.ort:
self.dropout = TorchNNDropout(config.attention_probs_dropout_prob)
else:
self.dropout = StableDropout(config.attention_probs_dropout_prob)

def transpose_for_scores(self, x, attention_heads):
new_x_shape = x.size()[:-1] + (attention_heads, -1)
Expand Down Expand Up @@ -824,7 +846,10 @@ def __init__(self, config):
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
if config.ort:
self.dropout = TorchNNDropout(config.hidden_dropout_prob)
else:
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config

# position_ids (1, len position emb) is contiguous in memory and exported when serialized
Expand Down Expand Up @@ -1237,7 +1262,10 @@ def __init__(self, config):
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
if config.ort:
self.dropout = TorchNNDropout(drop_out)
else:
self.dropout = StableDropout(drop_out)

# Initialize weights and apply final processing
self.post_init()
Expand Down
3 changes: 2 additions & 1 deletion src/transformers/models/roberta/modeling_roberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -1467,6 +1467,7 @@ class RobertaForQuestionAnswering(RobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ort = config.ort

self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
Expand Down Expand Up @@ -1536,7 +1537,7 @@ def forward(
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
ignored_index = start_logits.size(1) if not self.ort else 344
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)

Expand Down
77 changes: 62 additions & 15 deletions src/transformers/models/t5/modeling_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,12 +276,37 @@ def forward(self, hidden_states):
pass


class T5ClampedDropout(nn.Module):
def __init__(self, config):
super().__init__()
self.ort = config.ort
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout_rate = config.dropout_rate

def forward(self, hidden_states):
# clamp inf values to enable fp16 training
if self.ort:
# Remove data-based control flow for static graph
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max)
clamp_value = (1.0-self.dropout_rate)*clamp_value
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
else:
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)

hidden_states = self.dropout(hidden_states)
return hidden_states


class T5DenseReluDense(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout = T5ClampedDropout(config)
self.relu_act = ACT2FN["relu"]

def forward(self, hidden_states):
Expand All @@ -298,7 +323,7 @@ def __init__(self, config: T5Config):
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout = T5ClampedDropout(config)
self.gelu_act = ACT2FN["gelu_new"]

def forward(self, hidden_states):
Expand All @@ -323,7 +348,7 @@ def __init__(self, config: T5Config):
)

self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout = T5ClampedDropout(config)

def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
Expand Down Expand Up @@ -560,7 +585,7 @@ def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout = T5ClampedDropout(config)

def forward(
self,
Expand Down Expand Up @@ -592,7 +617,7 @@ def __init__(self, config):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout = T5ClampedDropout(config)

def forward(
self,
Expand Down Expand Up @@ -627,6 +652,7 @@ class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.ort = config.ort
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
Expand Down Expand Up @@ -680,9 +706,16 @@ def forward(
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights

# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
if self.ort:
# Remove data-based control flow for static graph
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
else:
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)

do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
Expand All @@ -707,9 +740,16 @@ def forward(
hidden_states = cross_attention_outputs[0]

# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
if self.ort:
# Remove data-based control flow for static graph
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
else:
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)

# Combine self attn and cross attn key value states
if present_key_value_state is not None:
Expand All @@ -722,9 +762,16 @@ def forward(
hidden_states = self.layer[-1](hidden_states)

# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
if self.ort:
# Remove data-based control flow for static graph
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
else:
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)

outputs = (hidden_states,)

Expand Down Expand Up @@ -843,7 +890,7 @@ def __init__(self, config, embed_tokens=None):
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout = T5ClampedDropout(config)

# Initialize weights and apply final processing
self.post_init()
Expand Down
14 changes: 13 additions & 1 deletion src/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1068,7 +1068,12 @@ def _wrap_model(self, model, training=True):

# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
if self.args.ort:
from torch_ort import ORTModule
if type(model) is not ORTModule:
return model
else:
return model

# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
Expand Down Expand Up @@ -1255,7 +1260,14 @@ def train(
delay_optimizer_creation = (
self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE or is_sagemaker_mp_enabled()
)
if args.ort:
from torch_ort import ORTModule
logger.info("Converting to ORTModule ....")
model = ORTModule(self.model)
self.model_wrapped = model
if args.deepspeed:
if args.ort:
self.model = model
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
Expand Down
Loading