Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cleanup tf unittests: part 2 #6260

Merged
merged 12 commits into from
Aug 13, 2020
22 changes: 9 additions & 13 deletions templates/adding_a_new_model/tests/test_modeling_tf_xxx.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,20 +148,18 @@ def create_and_check_xxx_model(

result = model(input_ids)

self.parent.assertListEqual(
list(result["last_hidden_state"].shape), [self.batch_size, self.seq_length, self.hidden_size]
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)
)
self.parent.assertListEqual(list(result["pooler_output"].shape), [self.batch_size, self.hidden_size])
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))

def create_and_check_xxx_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFXxxForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(
list(result["logits"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))

def create_and_check_xxx_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -170,7 +168,7 @@ def create_and_check_xxx_for_sequence_classification(
model = TFXxxForSequenceClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_labels])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))

def create_and_check_bert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -186,7 +184,7 @@ def create_and_check_bert_for_multiple_choice(
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))

def create_and_check_xxx_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -195,18 +193,16 @@ def create_and_check_xxx_for_token_classification(
model = TFXxxForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(
list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels]
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))

def create_and_check_xxx_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFXxxForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["start_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))

def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
Expand Down
2 changes: 1 addition & 1 deletion tests/test_modeling_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask,
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result["past_key_values"]), config.n_layer)
self.parent.assertEqual(len(result.past_key_values), config.n_layer)

def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_modeling_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask,
result = model(input_ids)

self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result["past_key_values"]), config.n_layer)
self.parent.assertEqual(len(result.past_key_values), config.n_layer)

def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_modeling_mbart.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_mbart_fast_forward(self):
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(result["logits"].shape, expected_shape)
self.assertEqual(result.logits.shape, expected_shape)


@require_torch
Expand Down
6 changes: 3 additions & 3 deletions tests/test_modeling_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,9 @@ def create_and_check_t5_model(
decoder_attention_mask=decoder_attention_mask,
)
result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
decoder_output = result["last_hidden_state"]
decoder_past = result["decoder_past_key_values"]
encoder_output = result["encoder_last_hidden_state"]
decoder_output = result.last_hidden_state
decoder_past = result.decoder_past_key_values
encoder_output = result.encoder_last_hidden_state

self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))
Expand Down
20 changes: 8 additions & 12 deletions tests/test_modeling_tf_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,8 @@ def create_and_check_albert_model(

result = model(input_ids)

self.parent.assertListEqual(
list(result["last_hidden_state"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooler_output"].shape), [self.batch_size, self.hidden_size])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))

def create_and_check_albert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -149,18 +147,16 @@ def create_and_check_albert_for_pretraining(
model = TFAlbertForPreTraining(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(
list(result["prediction_logits"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(list(result["sop_logits"].shape), [self.batch_size, self.num_labels])
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, self.num_labels))

def create_and_check_albert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFAlbertForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))

def create_and_check_albert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -169,16 +165,16 @@ def create_and_check_albert_for_sequence_classification(
model = TFAlbertForSequenceClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_labels])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))

def create_and_check_albert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFAlbertForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["start_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))

def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
Expand Down
26 changes: 11 additions & 15 deletions tests/test_modeling_tf_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,8 @@ def create_and_check_bert_model(

result = model(input_ids)

self.parent.assertListEqual(
list(result["last_hidden_state"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooler_output"].shape), [self.batch_size, self.hidden_size])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))

def create_and_check_bert_lm_head(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -165,26 +163,24 @@ def create_and_check_bert_for_masked_lm(
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))

def create_and_check_bert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFBertForNextSentencePrediction(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, 2])
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))

def create_and_check_bert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFBertForPreTraining(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(
list(result["prediction_logits"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(list(result["seq_relationship_logits"].shape), [self.batch_size, 2])
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))

def create_and_check_bert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -198,7 +194,7 @@ def create_and_check_bert_for_sequence_classification(
}

result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_labels])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))

def create_and_check_bert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -214,7 +210,7 @@ def create_and_check_bert_for_multiple_choice(
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))

def create_and_check_bert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -227,7 +223,7 @@ def create_and_check_bert_for_token_classification(
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))

def create_and_check_bert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -240,8 +236,8 @@ def create_and_check_bert_for_question_answering(
}

result = model(inputs)
self.parent.assertListEqual(list(result["start_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))

def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
Expand Down
6 changes: 2 additions & 4 deletions tests/test_modeling_tf_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,15 +119,13 @@ def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask,

result = model(input_ids)

self.parent.assertListEqual(
list(result["last_hidden_state"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))

def create_and_check_ctrl_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFCTRLLMHeadModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))

def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
Expand Down
16 changes: 7 additions & 9 deletions tests/test_modeling_tf_distilbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,17 +106,15 @@ def create_and_check_distilbert_model(

result = model(inputs)

self.parent.assertListEqual(
list(result["last_hidden_state"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))

def create_and_check_distilbert_for_masked_lm(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDistilBertForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))

def create_and_check_distilbert_for_question_answering(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -127,8 +125,8 @@ def create_and_check_distilbert_for_question_answering(
"attention_mask": input_mask,
}
result = model(inputs)
self.parent.assertListEqual(list(result["start_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))

def create_and_check_distilbert_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -137,7 +135,7 @@ def create_and_check_distilbert_for_sequence_classification(
model = TFDistilBertForSequenceClassification(config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_labels])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))

def create_and_check_distilbert_for_multiple_choice(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -151,7 +149,7 @@ def create_and_check_distilbert_for_multiple_choice(
"attention_mask": multiple_choice_input_mask,
}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))

def create_and_check_distilbert_for_token_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
Expand All @@ -160,7 +158,7 @@ def create_and_check_distilbert_for_token_classification(
model = TFDistilBertForTokenClassification(config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
result = model(inputs)
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))

def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
Expand Down
Loading