From f6f0de566ffdf7b68df9882e80cd07df086f3048 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Thu, 5 Nov 2020 12:11:00 +0900 Subject: [PATCH 01/11] Simply insert T5Tokenizer's prepare_seq2seq_batch --- src/transformers/tokenization_prophetnet.py | 40 +++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/transformers/tokenization_prophetnet.py b/src/transformers/tokenization_prophetnet.py index 6936c032f9208e..f16fd0d510d0cf 100644 --- a/src/transformers/tokenization_prophetnet.py +++ b/src/transformers/tokenization_prophetnet.py @@ -286,3 +286,43 @@ def build_inputs_with_special_tokens( return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep + + @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) + def prepare_seq2seq_batch( + self, + src_texts: List[str], + tgt_texts: Optional[List[str]] = None, + max_length: Optional[int] = None, + max_target_length: Optional[int] = None, + padding: str = "longest", + return_tensors: str = None, + truncation: bool = True, + **kwargs, + ) -> BatchEncoding: + if max_length is None: + max_length = self.max_len + model_inputs = self( + src_texts, + add_special_tokens=True, + return_tensors=return_tensors, + max_length=max_length, + padding=padding, + truncation=truncation, + **kwargs, + ) + if tgt_texts is None: + return model_inputs + # Process tgt_texts + if max_target_length is None: + max_target_length = max_length + labels_and_decoder_mask = self( + tgt_texts, + add_special_tokens=True, + return_tensors=return_tensors, + padding=padding, + max_length=max_target_length, + truncation=truncation, + **kwargs, + ) + model_inputs["labels"] = labels_and_decoder_mask["input_ids"] + return model_inputs From 67c5fe6014067adfa22451cd53933d4bd8dc1890 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Thu, 5 Nov 2020 12:21:06 +0900 Subject: [PATCH 02/11] Update/Add some 'import' --- src/transformers/tokenization_prophetnet.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/tokenization_prophetnet.py b/src/transformers/tokenization_prophetnet.py index f16fd0d510d0cf..fe97f17b73c307 100644 --- a/src/transformers/tokenization_prophetnet.py +++ b/src/transformers/tokenization_prophetnet.py @@ -17,8 +17,10 @@ import os from typing import List, Optional, Tuple +from .file_utils import add_start_docstrings from .tokenization_bert import BasicTokenizer, WordpieceTokenizer -from .tokenization_utils import PreTrainedTokenizer +from .tokenization_utils import BatchEncoding, PreTrainedTokenizer +from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from .utils import logging From 266a85780c268d79f89bab72a32acec59e9bf567 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 6 Nov 2020 02:38:18 +0900 Subject: [PATCH 03/11] fix RunTimeError caused by '.view' --- examples/seq2seq/seq2seq_trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/seq2seq/seq2seq_trainer.py b/examples/seq2seq/seq2seq_trainer.py index 805a73871fb22e..c0f1d2d9475abf 100644 --- a/examples/seq2seq/seq2seq_trainer.py +++ b/examples/seq2seq/seq2seq_trainer.py @@ -136,7 +136,9 @@ def _compute_loss(self, model, inputs, labels): if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token logits = model(**inputs, use_cache=False)[0] - loss = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1)) + + # When we used ProphetNet with `logits.reshape(-1, logits.shape[-1])`, 'RunTimeError' occurred here. + loss = self.loss_fn(logits.reshape(-1, logits.shape[-1]), labels.view(-1)) else: # compute usual loss via models loss, logits = model(**inputs, labels=labels, use_cache=False)[:2] From 85c5a901bb4b20e8747e690a40876acdc0c20d80 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 17:10:22 +0900 Subject: [PATCH 04/11] Moves .view related error avoidance from seq2seq_trainer to inside prophetnet --- examples/seq2seq/seq2seq_trainer.py | 4 +--- src/transformers/modeling_prophetnet.py | 4 ++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/seq2seq/seq2seq_trainer.py b/examples/seq2seq/seq2seq_trainer.py index c0f1d2d9475abf..805a73871fb22e 100644 --- a/examples/seq2seq/seq2seq_trainer.py +++ b/examples/seq2seq/seq2seq_trainer.py @@ -136,9 +136,7 @@ def _compute_loss(self, model, inputs, labels): if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token logits = model(**inputs, use_cache=False)[0] - - # When we used ProphetNet with `logits.reshape(-1, logits.shape[-1])`, 'RunTimeError' occurred here. - loss = self.loss_fn(logits.reshape(-1, logits.shape[-1]), labels.view(-1)) + loss = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1)) else: # compute usual loss via models loss, logits = model(**inputs, labels=labels, use_cache=False)[:2] diff --git a/src/transformers/modeling_prophetnet.py b/src/transformers/modeling_prophetnet.py index 96508c667da567..d3350407915956 100644 --- a/src/transformers/modeling_prophetnet.py +++ b/src/transformers/modeling_prophetnet.py @@ -1766,6 +1766,10 @@ def forward( logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None + # To use .view in loss computation, make sure that logits is contiguous. + if not logits.is_contiguous(): + logits = logits.contiguous() + loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) From 759c8bbc939951bac521876805eb162e054f9790 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 19:25:24 +0900 Subject: [PATCH 05/11] Update test_tokenization_prophetnet.py --- tests/test_tokenization_prophetnet.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index fc0954090896e2..538289fdeed3f2 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -189,3 +189,24 @@ def test_sequence_builders(self): assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_2 + [102] + + def test_prepare_seq2seq_batch(self): + tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") + + src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] + tgt_text = [ + "Summary of the text.", + "Another summary.", + ] + expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] + batch = tokenizer.prepare_seq2seq_batch( + src_text, + tgt_texts=tgt_text, + return_tensors='pt', + ) + self.assertIsInstance(batch, BatchEncoding) + result = list(batch.input_ids.numpy()[0]) + self.assertListEqual(expected_src_tokens, result) + + self.assertEqual((2, 9), batch.input_ids.shape) + self.assertEqual((2, 9), batch.attention_mask.shape) \ No newline at end of file From 1a99c0b68a19bdd4989d3e16a0b84868cfd5c1bc Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 19:33:24 +0900 Subject: [PATCH 06/11] Format the test code with black --- tests/test_tokenization_prophetnet.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 538289fdeed3f2..566d4bdf034c01 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -199,14 +199,10 @@ def test_prepare_seq2seq_batch(self): "Another summary.", ] expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] - batch = tokenizer.prepare_seq2seq_batch( - src_text, - tgt_texts=tgt_text, - return_tensors='pt', - ) + batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, return_tensors="pt",) self.assertIsInstance(batch, BatchEncoding) result = list(batch.input_ids.numpy()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) - self.assertEqual((2, 9), batch.attention_mask.shape) \ No newline at end of file + self.assertEqual((2, 9), batch.attention_mask.shape) From 25acd885823bbbbb3b25e7f19206cbc7754343c4 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 19:41:38 +0900 Subject: [PATCH 07/11] Re-format the test code --- tests/test_tokenization_prophetnet.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 566d4bdf034c01..8bbdb74893ab6d 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -199,10 +199,15 @@ def test_prepare_seq2seq_batch(self): "Another summary.", ] expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] - batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, return_tensors="pt",) + batch = tokenizer.prepare_seq2seq_batch( + src_text, + tgt_texts=tgt_text, + return_tensors='pt', + ) self.assertIsInstance(batch, BatchEncoding) result = list(batch.input_ids.numpy()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) + \ No newline at end of file From 751623efb3c212238499901b06209533955d3230 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 19:55:13 +0900 Subject: [PATCH 08/11] Update test_tokenization_prophetnet.py --- tests/test_tokenization_prophetnet.py | 44 +++++++++++++-------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 8bbdb74893ab6d..88d67b0f58f033 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -150,6 +150,28 @@ def test_wordpiece_tokenizer(self): self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) + @require_torch + def test_prepare_seq2seq_batch(self): + tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") + + src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] + tgt_text = [ + "Summary of the text.", + "Another summary.", + ] + expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] + batch = tokenizer.prepare_seq2seq_batch( + src_text, + tgt_texts=tgt_text, + return_tensors='pt', + ) + self.assertIsInstance(batch, BatchEncoding) + result = list(batch.input_ids.numpy()[0]) + self.assertListEqual(expected_src_tokens, result) + + self.assertEqual((2, 9), batch.input_ids.shape) + self.assertEqual((2, 9), batch.attention_mask.shape) + def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) @@ -189,25 +211,3 @@ def test_sequence_builders(self): assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_2 + [102] - - def test_prepare_seq2seq_batch(self): - tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") - - src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] - tgt_text = [ - "Summary of the text.", - "Another summary.", - ] - expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] - batch = tokenizer.prepare_seq2seq_batch( - src_text, - tgt_texts=tgt_text, - return_tensors='pt', - ) - self.assertIsInstance(batch, BatchEncoding) - result = list(batch.input_ids.numpy()[0]) - self.assertListEqual(expected_src_tokens, result) - - self.assertEqual((2, 9), batch.input_ids.shape) - self.assertEqual((2, 9), batch.attention_mask.shape) - \ No newline at end of file From a4fce355beadd2d5e4c83906a1213fee168c8024 Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 20:00:06 +0900 Subject: [PATCH 09/11] Add importing require_torch in the test code --- tests/test_tokenization_prophetnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 88d67b0f58f033..12f1d5f43c5b06 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -17,7 +17,7 @@ import os import unittest -from transformers.testing_utils import slow +from transformers.testing_utils import require_torch, slow from transformers.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, From f54b75e0bd0c3ed2f104e57720c2ec72aef3146e Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 22:42:40 +0900 Subject: [PATCH 10/11] Add importing BatchEncoding in the test code --- tests/test_tokenization_prophetnet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 12f1d5f43c5b06..52f45dd0af93a0 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -17,6 +17,7 @@ import os import unittest +from transformers import BatchEncoding from transformers.testing_utils import require_torch, slow from transformers.tokenization_bert import ( BasicTokenizer, From 80ca4d303cdb956a93ac1fa8f0ab9219ec53368e Mon Sep 17 00:00:00 2001 From: Yusuke Mori Date: Fri, 13 Nov 2020 22:53:40 +0900 Subject: [PATCH 11/11] Re-format the test code on Colab --- tests/test_tokenization_prophetnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 52f45dd0af93a0..34f32c66ca6ad6 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -164,7 +164,7 @@ def test_prepare_seq2seq_batch(self): batch = tokenizer.prepare_seq2seq_batch( src_text, tgt_texts=tgt_text, - return_tensors='pt', + return_tensors="pt", ) self.assertIsInstance(batch, BatchEncoding) result = list(batch.input_ids.numpy()[0])