-
Notifications
You must be signed in to change notification settings - Fork 26.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Shorten the conversation tests for speed + fixing position overflows #26960
Changes from 7 commits
42c4d25
f44f2af
131754f
6922575
23cddb1
1d6f7b6
d4eaeea
bfd2813
0421bb9
5b19e9c
6d5a2b4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -85,7 +85,7 @@ def __init__( | |
hidden_act="gelu", | ||
hidden_dropout_prob=0.1, | ||
attention_probs_dropout_prob=0.1, | ||
max_position_embeddings=20, | ||
max_position_embeddings=50, | ||
eos_token_id=2, | ||
pad_token_id=1, | ||
bos_token_id=0, | ||
|
@@ -244,9 +244,6 @@ def is_pipeline_test_to_skip( | |
): | ||
if pipeline_test_casse_name == "TextGenerationPipelineTests": | ||
return True | ||
# TODO @Rocketnight1 to fix | ||
if pipeline_test_casse_name == "ConversationalPipelineTests": | ||
return True | ||
return False | ||
|
||
def setUp(self): | ||
|
@@ -284,6 +281,10 @@ def test_generate_fp16(self): | |
model.generate(input_ids, attention_mask=attention_mask) | ||
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) | ||
|
||
@unittest.skip("Tiny random model has too few position embeddings for this.") | ||
def test_pipeline_conversational(self): | ||
pass | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's put this back to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will do! |
||
|
||
|
||
def assert_tensors_close(a, b, atol=1e-12, prefix=""): | ||
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -86,7 +86,7 @@ def __init__( | |
hidden_act="gelu", | ||
hidden_dropout_prob=0.1, | ||
attention_probs_dropout_prob=0.1, | ||
max_position_embeddings=32, | ||
max_position_embeddings=50, | ||
eos_token_id=2, | ||
pad_token_id=1, | ||
bos_token_id=0, | ||
|
@@ -397,3 +397,7 @@ def test_model_from_pretrained(self): | |
input_ids = np.ones((1, 1)) * model.config.eos_token_id | ||
outputs = model(input_ids) | ||
self.assertIsNotNone(outputs) | ||
|
||
@unittest.skip("Tiny random model has too few position embeddings for this.") | ||
def test_pipeline_conversational(self): | ||
pass | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -53,7 +53,7 @@ def __init__( | |
intermediate_size=37, | ||
hidden_dropout_prob=0.1, | ||
attention_probs_dropout_prob=0.1, | ||
max_position_embeddings=20, | ||
max_position_embeddings=50, | ||
eos_token_id=2, | ||
pad_token_id=1, | ||
bos_token_id=0, | ||
|
@@ -209,14 +209,9 @@ def test_decoder_model_past_large_inputs(self): | |
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() | ||
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) | ||
|
||
# TODO: Fix the failed tests when this model gets more usage | ||
def is_pipeline_test_to_skip( | ||
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name | ||
): | ||
# TODO @Rocketnight1 to fix | ||
if pipeline_test_casse_name == "ConversationalPipelineTests": | ||
return True | ||
return False | ||
@unittest.skip("Tiny random model has too few position embeddings for this.") | ||
def test_pipeline_conversational(self): | ||
pass | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same |
||
|
||
|
||
@require_tokenizers | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -77,14 +77,14 @@ def get_test_pipeline(self, model, tokenizer, processor): | |
|
||
def run_pipeline_test(self, conversation_agent, _): | ||
# Simple | ||
outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=20) | ||
outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok |
||
self.assertEqual( | ||
outputs, | ||
Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), | ||
) | ||
|
||
# Single list | ||
outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=20) | ||
outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5) | ||
self.assertEqual( | ||
outputs, | ||
Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), | ||
|
@@ -96,7 +96,7 @@ def run_pipeline_test(self, conversation_agent, _): | |
self.assertEqual(len(conversation_1), 1) | ||
self.assertEqual(len(conversation_2), 1) | ||
|
||
outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=20) | ||
outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5) | ||
self.assertEqual(outputs, [conversation_1, conversation_2]) | ||
self.assertEqual( | ||
outputs, | ||
|
@@ -118,7 +118,7 @@ def run_pipeline_test(self, conversation_agent, _): | |
|
||
# One conversation with history | ||
conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"}) | ||
outputs = conversation_agent(conversation_2, max_new_tokens=20) | ||
outputs = conversation_agent(conversation_2, max_new_tokens=5) | ||
self.assertEqual(outputs, conversation_2) | ||
self.assertEqual( | ||
outputs, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This change won't be reflect on the
tiny models uploaded to Hub (hf-internal-testing)
. So the pipeline testing will still use the previous value. We still skip the test, so it is fine, but the change here has no effect.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
(the above is just a comment, not a request to change)