Skip to content

Commit

Permalink
Generation tests: don't rely on main input name (#34228)
Browse files Browse the repository at this point in the history
* don't rely on main input name

* update
  • Loading branch information
zucchini-nlp authored Oct 21, 2024
1 parent 816f442 commit ca541bd
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 36 deletions.
69 changes: 43 additions & 26 deletions tests/generation/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,15 +410,14 @@ def _contrastive_generate(
def test_greedy_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

model = model_class(config).to(torch_device).eval()
output_generate = self._greedy_generate(model=model, inputs_dict=inputs_dict)

if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

@pytest.mark.generate
def test_greedy_generate_dict_outputs(self):
Expand All @@ -444,7 +443,9 @@ def test_greedy_generate_dict_outputs(self):
# Retrocompatibility check
self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)
self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput)
# Retrocompatibility check
self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput)
Expand Down Expand Up @@ -478,23 +479,24 @@ def test_greedy_generate_dict_outputs_use_cache(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)

self._check_outputs(output_generate, main_input, model.config, use_cache=True)

@pytest.mark.generate
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

model = model_class(config).to(torch_device).eval()
output_generate = self._sample_generate(model=model, inputs_dict=inputs_dict, num_return_sequences=1)

if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

@pytest.mark.generate
def test_sample_generate_dict_output(self):
Expand All @@ -521,7 +523,9 @@ def test_sample_generate_dict_output(self):
# Retrocompatibility check
self.assertIsInstance(output_generate, SampleEncoderDecoderOutput)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)
self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput)
# Retrocompatibility check
self.assertIsInstance(output_generate, SampleDecoderOnlyOutput)
Expand All @@ -532,7 +536,6 @@ def test_sample_generate_dict_output(self):
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

model = model_class(config).to(torch_device).eval()

Expand All @@ -542,7 +545,7 @@ def test_beam_search_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

@pytest.mark.generate
def test_beam_search_generate_dict_output(self):
Expand All @@ -569,7 +572,9 @@ def test_beam_search_generate_dict_output(self):
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)
self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput)
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
Expand Down Expand Up @@ -609,7 +614,9 @@ def test_beam_search_generate_dict_outputs_use_cache(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)

self._check_outputs(
output_generate,
Expand Down Expand Up @@ -647,7 +654,6 @@ def test_model_parallel_beam_search(self):
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

model = model_class(config).to(torch_device).eval()
beam_kwargs = self._get_beam_kwargs()
Expand All @@ -660,7 +666,7 @@ def test_beam_sample_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

# for VLMs inputs embeds won't match input ids unless images are encoded and merged with ids properly
# no quick fix available, since obtaining image embeddings step is very model-specific
Expand Down Expand Up @@ -712,7 +718,9 @@ def test_beam_sample_generate_dict_output(self):
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)
self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput)
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput)
Expand Down Expand Up @@ -746,7 +754,6 @@ def test_generate_without_input_ids(self):
def test_group_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

model = model_class(config).to(torch_device).eval()
# check `generate()` and `group_beam_search()` are equal
Expand All @@ -759,7 +766,7 @@ def test_group_beam_search_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

# check `group_beam_search` for higher than 1 `num_return_sequences`
num_return_sequences = 2
Expand All @@ -772,7 +779,7 @@ def test_group_beam_search_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

@pytest.mark.generate
def test_group_beam_search_generate_dict_output(self):
Expand All @@ -799,7 +806,9 @@ def test_group_beam_search_generate_dict_output(self):
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)
self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput)
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
Expand All @@ -814,7 +823,6 @@ def test_group_beam_search_generate_dict_output(self):
def test_constrained_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

model = model_class(config).to(torch_device).eval()

Expand All @@ -838,7 +846,7 @@ def test_constrained_beam_search_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
Expand All @@ -862,7 +870,7 @@ def test_constrained_beam_search_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
Expand Down Expand Up @@ -903,7 +911,9 @@ def test_constrained_beam_search_generate_dict_output(self):
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)
self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput)
# Retrocompatibility check
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
Expand All @@ -923,7 +933,6 @@ def test_contrastive_generate(self):
self.skipTest(reason="Won't fix: old model with different cache format")

config, inputs_dict = self.prepare_config_and_inputs_for_generate()
main_input = inputs_dict[model_class.main_input_name]

# NOTE: contrastive search only works with cache on at the moment.
if not hasattr(config, "use_cache"):
Expand All @@ -940,7 +949,7 @@ def test_contrastive_generate(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(output_generate.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1])

@pytest.mark.generate
def test_contrastive_generate_dict_outputs_use_cache(self):
Expand Down Expand Up @@ -975,7 +984,9 @@ def test_contrastive_generate_dict_outputs_use_cache(self):
if model.config.is_encoder_decoder:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + 1)
else:
self.assertTrue(output_generate.sequences.shape[-1] == self.max_new_tokens + main_input.shape[-1])
self.assertTrue(
output_generate.sequences.shape[-1] == self.max_new_tokens + inputs_dict["input_ids"].shape[-1]
)

self._check_outputs(output_generate, main_input, model.config, use_cache=True)

Expand Down Expand Up @@ -2035,8 +2046,14 @@ def test_inherits_generation_mixin(self):
self.assertTrue("GenerationMixin" in str(model_class.__bases__))

def _check_outputs(self, output, main_input, config, use_cache=False, num_return_sequences=1):
# we can be sure what is batch size from main input but seq length depends on model type and whether input is text/audio/image
# so we infer actual text seq length from model_tester, same was as it is done in `test_modeling_common.py` tests`
batch_size = main_input.shape[0]
seq_length = main_input.shape[-1]

seq_length = getattr(self.model_tester, "seq_length", None)
seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length)
seq_length = getattr(self.model_tester, "text_seq_length", seq_length)

config = config.text_config if hasattr(config, "text_config") else config
num_sequences_in_output = batch_size * num_return_sequences

Expand Down
6 changes: 4 additions & 2 deletions tests/models/reformer/test_modeling_reformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ def __init__(
parent,
batch_size=13,
seq_length=32,
text_seq_length=None,
is_training=True,
is_decoder=True,
use_input_mask=True,
Expand Down Expand Up @@ -128,6 +129,7 @@ def __init__(
self.attn_layers = attn_layers
self.pad_token_id = pad_token_id
self.hash_seed = hash_seed
self.text_seq_length = text_seq_length or seq_length

attn_chunk_length = local_attn_chunk_length if local_attn_chunk_length is not None else lsh_attn_chunk_length
num_chunks_after = local_num_chunks_after if local_num_chunks_after is not None else lsh_num_chunks_after
Expand Down Expand Up @@ -608,7 +610,7 @@ class ReformerLocalAttnModelTest(ReformerTesterMixin, GenerationTesterMixin, Mod
test_sequence_classification_problem_types = True

def setUp(self):
self.model_tester = ReformerModelTester(self)
self.model_tester = ReformerModelTester(self, text_seq_length=16)
self.config_tester = ConfigTester(self, config_class=ReformerConfig, hidden_size=37)

@slow
Expand Down Expand Up @@ -689,7 +691,7 @@ def prepare_config_and_inputs_for_generate(self, *args, **kwargs):
# decreasing the seq_length in tester causes errors for "training_tests", those need exactly max seq length
# NOTE: seq_length has to be multiple of 4, otherwise it fails for other tests
original_sequence_length = self.model_tester.seq_length
self.model_tester.seq_length = 16
self.model_tester.seq_length = self.model_tester.text_seq_length
test_inputs = super().prepare_config_and_inputs_for_generate(*args, **kwargs)
self.model_tester.seq_length = original_sequence_length
return test_inputs
Expand Down
8 changes: 0 additions & 8 deletions tests/models/speech_to_text/test_modeling_speech_to_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -618,14 +618,6 @@ def test_resize_embeddings_untied(self):
def test_generate_without_input_ids(self):
pass

def _check_outputs(self, output, main_input, config, use_cache=False, num_return_sequences=1):
# In this model, the index of `batch_size` and `sequence_length`` in `main_input` is different: they are the
# first two dimensions of the tensor.
main_input = main_input[:, :, 0]
super()._check_outputs(
output, main_input, config, use_cache=use_cache, num_return_sequences=num_return_sequences
)

def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
self.skipTest(reason="test_torchscript is set to False")
Expand Down

0 comments on commit ca541bd

Please sign in to comment.