From 4e3694959b9e64f35f93ee794ddc31e2143c8d49 Mon Sep 17 00:00:00 2001 From: fabiocapsouza Date: Sun, 15 Nov 2020 12:30:46 -0300 Subject: [PATCH] Revert "[Tests] fix attention masks in Tests (#6621)" This reverts commit d608dd6149ea9f829814d9a84a96d66acc6e00d8. --- tests/test_modeling_albert.py | 4 ++-- tests/test_modeling_bert.py | 4 ++-- tests/test_modeling_common.py | 10 +++------- tests/test_modeling_ctrl.py | 4 ++-- tests/test_modeling_distilbert.py | 4 ++-- tests/test_modeling_dpr.py | 4 ++-- tests/test_modeling_electra.py | 4 ++-- tests/test_modeling_flaubert.py | 4 ++-- tests/test_modeling_gpt2.py | 4 ++-- tests/test_modeling_longformer.py | 4 ++-- tests/test_modeling_mobilebert.py | 4 ++-- tests/test_modeling_reformer.py | 4 ++-- tests/test_modeling_roberta.py | 4 ++-- tests/test_modeling_xlm.py | 4 ++-- tests/test_modeling_xlnet.py | 4 ++-- 15 files changed, 31 insertions(+), 35 deletions(-) diff --git a/tests/test_modeling_albert.py b/tests/test_modeling_albert.py index 15459703bde655..7abda856003a95 100644 --- a/tests/test_modeling_albert.py +++ b/tests/test_modeling_albert.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -71,7 +71,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_bert.py b/tests/test_modeling_bert.py index 0e640f5f35983b..fe336df743c99f 100755 --- a/tests/test_modeling_bert.py +++ b/tests/test_modeling_bert.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): @@ -93,7 +93,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 9186ce548d56b7..1cab7b19704e48 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -704,6 +704,9 @@ def recursive_check(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return + elif torch.isinf(tuple_object).any() and torch.isinf(dict_object).any(): + # TODO: (Lysandre) - maybe take a look if that's ok here + return else: self.assertTrue( torch.allclose(tuple_object, dict_object, atol=1e-5), @@ -934,13 +937,6 @@ def ids_tensor(shape, vocab_size, rng=None, name=None): return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() -def random_attention_mask(shape, rng=None, name=None): - attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) - # make sure that at least one token is attended to for each batch - attn_mask[:, -1] = 1 - return attn_mask - - def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: diff --git a/tests/test_modeling_ctrl.py b/tests/test_modeling_ctrl.py index 0cf997a1ab44c6..9920cde0310cca 100644 --- a/tests/test_modeling_ctrl.py +++ b/tests/test_modeling_ctrl.py @@ -19,7 +19,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -60,7 +60,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_distilbert.py b/tests/test_modeling_distilbert.py index 1a1dd4d7e9ba66..8e76e23dd2af2a 100644 --- a/tests/test_modeling_distilbert.py +++ b/tests/test_modeling_distilbert.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -89,7 +89,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) sequence_labels = None token_labels = None diff --git a/tests/test_modeling_dpr.py b/tests/test_modeling_dpr.py index 666c7898bf7c37..d6206f17172eec 100644 --- a/tests/test_modeling_dpr.py +++ b/tests/test_modeling_dpr.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -88,7 +88,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_electra.py b/tests/test_modeling_electra.py index ae13e682aa7491..88c0eafa578837 100644 --- a/tests/test_modeling_electra.py +++ b/tests/test_modeling_electra.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -69,7 +69,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_flaubert.py b/tests/test_modeling_flaubert.py index b748945618e3be..aaecafc435efe9 100644 --- a/tests/test_modeling_flaubert.py +++ b/tests/test_modeling_flaubert.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -72,7 +72,7 @@ def __init__( def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float() input_lengths = None if self.use_input_lengths: diff --git a/tests/test_modeling_gpt2.py b/tests/test_modeling_gpt2.py index 19193a31df2c49..dd4ca1d304d839 100644 --- a/tests/test_modeling_gpt2.py +++ b/tests/test_modeling_gpt2.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): @@ -92,7 +92,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_longformer.py b/tests/test_modeling_longformer.py index 30f38f7776192f..0730dce654f417 100644 --- a/tests/test_modeling_longformer.py +++ b/tests/test_modeling_longformer.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -82,7 +82,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_mobilebert.py b/tests/test_modeling_mobilebert.py index a10fb376a146d3..cedc075b9fdbc1 100644 --- a/tests/test_modeling_mobilebert.py +++ b/tests/test_modeling_mobilebert.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): @@ -94,7 +94,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_reformer.py b/tests/test_modeling_reformer.py index ca4bd1b5f9ec64..b6e3df069f2712 100644 --- a/tests/test_modeling_reformer.py +++ b/tests/test_modeling_reformer.py @@ -19,7 +19,7 @@ from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): @@ -133,7 +133,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) choice_labels = None if self.use_labels: diff --git a/tests/test_modeling_roberta.py b/tests/test_modeling_roberta.py index 7c432a9b878f9c..ddf4695127fe03 100644 --- a/tests/test_modeling_roberta.py +++ b/tests/test_modeling_roberta.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): @@ -71,7 +71,7 @@ def prepare_config_and_inputs(self): input_mask = None if self.use_input_mask: - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: diff --git a/tests/test_modeling_xlm.py b/tests/test_modeling_xlm.py index dc5d89cec8fc86..8114cd6ad8db96 100644 --- a/tests/test_modeling_xlm.py +++ b/tests/test_modeling_xlm.py @@ -20,7 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -73,7 +73,7 @@ def __init__( def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float() input_lengths = None if self.use_input_lengths: diff --git a/tests/test_modeling_xlnet.py b/tests/test_modeling_xlnet.py index 31738a5bfd2743..0408b25f63d100 100644 --- a/tests/test_modeling_xlnet.py +++ b/tests/test_modeling_xlnet.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): @@ -100,7 +100,7 @@ def prepare_config_and_inputs(self): input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) - input_mask = random_attention_mask([self.batch_size, self.seq_length]) + input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float() input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size) perm_mask = torch.zeros(