Skip to content

Commit

Permalink
Revert "[Tests] fix attention masks in Tests (huggingface#6621)"
Browse files Browse the repository at this point in the history
This reverts commit d608dd6.
  • Loading branch information
fabiocapsouza authored Nov 15, 2020
1 parent 5e04391 commit 4e36949
Show file tree
Hide file tree
Showing 15 changed files with 31 additions and 35 deletions.
4 changes: 2 additions & 2 deletions tests/test_modeling_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -71,7 +71,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -93,7 +93,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
10 changes: 3 additions & 7 deletions tests/test_modeling_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -704,6 +704,9 @@ def recursive_check(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
elif torch.isinf(tuple_object).any() and torch.isinf(dict_object).any():
# TODO: (Lysandre) - maybe take a look if that's ok here
return
else:
self.assertTrue(
torch.allclose(tuple_object, dict_object, atol=1e-5),
Expand Down Expand Up @@ -934,13 +937,6 @@ def ids_tensor(shape, vocab_size, rng=None, name=None):
return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()


def random_attention_mask(shape, rng=None, name=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask


def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -60,7 +60,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_distilbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -89,7 +89,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

sequence_labels = None
token_labels = None
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_dpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -88,7 +88,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_electra.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -69,7 +69,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_flaubert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -72,7 +72,7 @@ def __init__(

def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()

input_lengths = None
if self.use_input_lengths:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -92,7 +92,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -82,7 +82,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -94,7 +94,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_reformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -133,7 +133,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

choice_labels = None
if self.use_labels:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_roberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -71,7 +71,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -73,7 +73,7 @@ def __init__(

def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()

input_lengths = None
if self.use_input_lengths:
Expand Down
4 changes: 2 additions & 2 deletions tests/test_modeling_xlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import require_torch, slow, torch_device

from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from .test_modeling_common import ModelTesterMixin, ids_tensor


if is_torch_available():
Expand Down Expand Up @@ -100,7 +100,7 @@ def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()

input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size)
perm_mask = torch.zeros(
Expand Down

0 comments on commit 4e36949

Please sign in to comment.