Skip to content

Torchscript benchmark measure #6907

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions benchmark_pytorch_scripting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#!/usr/bin/env python3
from transformers.modeling_bert import BertScriptableModel
from transformers import BertConfig, BertModel, PyTorchBenchmark, PyTorchBenchmarkArguments
import torch


def get_model(torchscript=False, device="cpu", config=None, max_seq_length=None):
input_ids = torch.ones((1, max_seq_length), device=device, dtype=torch.long)
if not torchscript:
model = BertModel(config).to(device).eval()
traced_model = torch.jit.trace(model, input_ids)
return traced_model
model = BertScriptableModel(config).to(device).eval()
return torch.jit.script(model)


def get_input_ids(input_tensor_type="single", config=None, batch_size=None, sequence_length=None, device="cpu"):
if input_tensor_type == "single":
return [torch.randint(config.vocab_size, (batch_size, sequence_length), dtype=torch.long, device=device)]
elif input_tensor_type == "batched":
num_batches = batch_size // 8
sequence_lengths = [torch.randint(1, sequence_length, (1,)).item() for i in range(num_batches)]
return [torch.randint(config.vocab_size, (10, sequence_length), dtype=torch.long, device=device) for sequence_length in sequence_lengths]
elif input_tensor_type == "multiple":
sequence_lengths = [torch.randint(1, sequence_length, (1,)).item() for i in range(batch_size)]
return [torch.randint(config.vocab_size, (1, sequence_length), dtype=torch.long, device=device) for sequence_length in sequence_lengths]
else:
raise ValueError(f"{input_tensor_type} does not exist.")


def get_inference_func(device, config, sequence_length, batch_size, input_tensor_type, torchscript):
model = get_model(torchscript, device, config, sequence_length)
input_ids = get_input_ids(input_tensor_type=input_tensor_type, config=config, batch_size=batch_size, sequence_length=sequence_length, device=device)

@torch.no_grad()
def func():
for i in input_ids:
result = model(i)
return result
return func


def run_benchmark(batch_sizes, sequence_lengths, input_tensor_type="multiple", torchscript=True):
config = BertConfig.from_pretrained("bert-base-uncased")
args = PyTorchBenchmarkArguments(models=[f"Type: {input_tensor_type} - Script: {torchscript}"], no_memory=True, sequence_lengths=sequence_lengths, batch_sizes=batch_sizes, no_multi_process=True, repeat=1, torchscript=True, no_env_print=True)
device = args.device
benchmark = PyTorchBenchmark(args, configs=[config])

def _prepare_inference_func(model_name, batch_size, sequence_length):
return get_inference_func(device=device, config=config, sequence_length=sequence_length, batch_size=batch_size, input_tensor_type=input_tensor_type, torchscript=torchscript)

benchmark._prepare_inference_func = _prepare_inference_func
benchmark.run()


torch.manual_seed(0)
run_benchmark([500, 2500], [128, 512])
torch.manual_seed(0)
run_benchmark([500, 2500], [128, 512], torchscript=False)

torch.manual_seed(0)
run_benchmark([512, 4096], [128, 512], input_tensor_type="batched")
torch.manual_seed(0)
run_benchmark([512, 4096], [128, 512], torchscript=False, input_tensor_type="batched")
1 change: 1 addition & 0 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@

from .modeling_bert import (
BertPreTrainedModel,
BertScriptableModel,
BertModel,
BertForPreTraining,
BertForMaskedLM,
Expand Down
16 changes: 3 additions & 13 deletions src/transformers/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,19 +86,9 @@ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_len
config = self.config_dict[model_name]

if self.args.torchscript:
from transformers.modeling_bert import BertScriptableModel
config.torchscript = True

has_model_class_in_config = hasattr(config, "architecture") and len(config.architectures) > 1
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
model_class = config.architectures[0]
transformers_module = __import__("transformers", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
model = BertScriptableModel(config)
else:
model = MODEL_MAPPING[config.__class__](config)

Expand All @@ -118,7 +108,7 @@ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_len

if self.args.torchscript:
with torch.no_grad():
inference_model = torch.jit.trace(model, input_ids)
inference_model = torch.jit.script(model)
else:
inference_model = model

Expand Down
Loading