Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Shorten the conversation tests for speed + fixing position overflows #26960

Merged
merged 11 commits into from
Oct 31, 2023
2 changes: 1 addition & 1 deletion tests/models/blenderbot/test_modeling_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def __init__(
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_position_embeddings=50,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change won't be reflect on the tiny models uploaded to Hub (hf-internal-testing). So the pipeline testing will still use the previous value. We still skip the test, so it is fine, but the change here has no effect.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(the above is just a comment, not a request to change)

eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
Expand Down
2 changes: 1 addition & 1 deletion tests/models/blenderbot/test_modeling_flax_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=32,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
Expand Down
2 changes: 1 addition & 1 deletion tests/models/blenderbot/test_modeling_tf_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def __init__(
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
Expand Down Expand Up @@ -244,9 +244,6 @@ def is_pipeline_test_to_skip(
):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
# TODO @Rocketnight1 to fix
if pipeline_test_casse_name == "ConversationalPipelineTests":
return True
return False

def setUp(self):
Expand Down Expand Up @@ -284,6 +281,10 @@ def test_generate_fp16(self):
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)

@unittest.skip("Tiny random model has too few position embeddings for this.")
def test_pipeline_conversational(self):
pass
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's put this back to is_pipeline_test_to_skip (above) and just remove # TODO @Rocketnight1 to fix with the comment Tiny random model has too few position embeddings for this.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will do!



def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=32,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
Expand Down Expand Up @@ -397,3 +397,7 @@ def test_model_from_pretrained(self):
input_ids = np.ones((1, 1)) * model.config.eos_token_id
outputs = model(input_ids)
self.assertIsNotNone(outputs)

@unittest.skip("Tiny random model has too few position embeddings for this.")
def test_pipeline_conversational(self):
pass
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same

Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
Expand Down Expand Up @@ -209,14 +209,9 @@ def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)

# TODO: Fix the failed tests when this model gets more usage
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
# TODO @Rocketnight1 to fix
if pipeline_test_casse_name == "ConversationalPipelineTests":
return True
return False
@unittest.skip("Tiny random model has too few position embeddings for this.")
def test_pipeline_conversational(self):
pass
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same



@require_tokenizers
Expand Down
8 changes: 4 additions & 4 deletions tests/pipelines/test_pipelines_conversational.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,14 @@ def get_test_pipeline(self, model, tokenizer, processor):

def run_pipeline_test(self, conversation_agent, _):
# Simple
outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=20)
outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok

self.assertEqual(
outputs,
Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]),
)

# Single list
outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=20)
outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5)
self.assertEqual(
outputs,
Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]),
Expand All @@ -96,7 +96,7 @@ def run_pipeline_test(self, conversation_agent, _):
self.assertEqual(len(conversation_1), 1)
self.assertEqual(len(conversation_2), 1)

outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=20)
outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5)
self.assertEqual(outputs, [conversation_1, conversation_2])
self.assertEqual(
outputs,
Expand All @@ -118,7 +118,7 @@ def run_pipeline_test(self, conversation_agent, _):

# One conversation with history
conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"})
outputs = conversation_agent(conversation_2, max_new_tokens=20)
outputs = conversation_agent(conversation_2, max_new_tokens=5)
self.assertEqual(outputs, conversation_2)
self.assertEqual(
outputs,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_pipeline_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def data(n):

out = []
if task == "conversational":
for item in pipeline(data(10), batch_size=4, max_new_tokens=20):
for item in pipeline(data(10), batch_size=4, max_new_tokens=5):
out.append(item)
else:
for item in pipeline(data(10), batch_size=4):
Expand Down
Loading